diff --git a/deploy_component.sh b/deploy_component.sh new file mode 100755 index 0000000000000000000000000000000000000000..a4cf6184c83ef026562abe8e084430bba3ead9c8 --- /dev/null +++ b/deploy_component.sh @@ -0,0 +1,186 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# If not already set, set the URL of your local Docker registry where the images will be uploaded to. +# Leave it blank if you do not want to use any Docker registry. +export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""} +#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/" + +TFS_COMPONENTS=$1 + +# If not already set, set the tag you want to use for your images. +export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +# If not already set, set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} + +# If not already set, set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Constants +GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller" +TMP_FOLDER="./tmp" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" + +echo "Deploying component and collecting environment variables..." +ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh + +for COMPONENT in $TFS_COMPONENTS; do + echo "Processing '$COMPONENT' component..." + IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g') + + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + elif [ "$COMPONENT" == "pathcomp" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" + docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" + docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + # next command is redundant, but helpful to keep cache updated between rebuilds + docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + else + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" + fi + + if [ -n "$TFS_REGISTRY_IMAGE" ]; then + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." + + if [ "$COMPONENT" == "pathcomp" ]; then + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL-frontend" > "$TAG_LOG" + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL-backend" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" + docker push "$IMAGE_URL-frontend" > "$PUSH_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" + docker push "$IMAGE_URL-backend" > "$PUSH_LOG" + else + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi + fi + + echo " Adapting '$COMPONENT' manifest file..." + MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" + cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" + + if [ -n "$TFS_REGISTRY_IMAGE" ]; then + # Registry is set + if [ "$COMPONENT" == "pathcomp" ]; then + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL-frontend#g" "$MANIFEST" + + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL-backend#g" "$MANIFEST" + + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" + else + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" + fi + else + # Registry is not set + if [ "$COMPONENT" == "pathcomp" ]; then + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_NAME-frontend#g" "$MANIFEST" + + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_NAME-backend#g" "$MANIFEST" + + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" + else + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST" + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" + fi + fi + + # TODO: harmonize names of the monitoring component + + echo " Deploying '$COMPONENT' component to Kubernetes..." + DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" + kubectl --namespace $TFS_K8S_NAMESPACE delete -f "$MANIFEST" > "$DEPLOY_LOG" + kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") + kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + + echo " Collecting env-vars for '$COMPONENT' component..." + + SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json) + if [ -z "${SERVICE_DATA}" ]; then continue; fi + + # Env vars for service's host address + SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP') + if [ -z "${SERVICE_HOST}" ]; then continue; fi + # TODO: remove previous value from file + ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT + + # Env vars for service's 'grpc' port (if any) + SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port') + if [ -n "${SERVICE_PORT_GRPC}" ]; then + ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT + fi + + # Env vars for service's 'http' port (if any) + SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port') + if [ -n "${SERVICE_PORT_HTTP}" ]; then + ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT + fi + + printf "\n" +done + +# By now, leave this control here. Some component dependencies are not well handled +for COMPONENT in $TFS_COMPONENTS; do + echo "Waiting for '$COMPONENT' component..." + kubectl wait --namespace $TFS_K8S_NAMESPACE \ + --for='condition=available' --timeout=300s deployment/${COMPONENT}service + printf "\n" +done + +./show_deploy.sh + +echo "Done!" diff --git a/ecoc22 b/ecoc22 new file mode 120000 index 0000000000000000000000000000000000000000..3c61895e5ac62d0b38ce058ba5ff042442542320 --- /dev/null +++ b/ecoc22 @@ -0,0 +1 @@ +src/tests/ecoc22/ \ No newline at end of file diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 171394f7c43b2447e898902c78d5276fe1bcbc7c..46c7557d9178d1bb2bc36eda13a088606f56cede 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 2020 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:2020"] diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 75832b94fa2a6ba97617641e7b249157508614bf..efe43fe229a7f7ba862b10a04d44c6e9de06b5fb 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 3030 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] diff --git a/my_deploy.sh b/my_deploy.sh index 67a2e0558c25d767e14b635e6dd9174433827156..e70a12e1556ab06f6daa89c316c6a6ed61c4e059 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -1,13 +1,13 @@ # Set the URL of your local Docker registry where the images will be uploaded to. export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" -# Set the list of components, separated by comas, you want to build images for, and deploy. +# Set the list of components, separated by spaces, you want to build images for, and deploy. # Supported components are: # context device automation policy service compute monitoring webui # interdomain slice pathcomp dlt -# dbscanserving opticalattackmitigator opticalcentralizedattackdetector +# dbscanserving opticalattackmitigator opticalattackdetector # l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector -export TFS_COMPONENTS="context device automation service compute monitoring webui" +export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" diff --git a/proto/context.proto b/proto/context.proto index 866876175f108c056f7e35c6457a1bf48a226a9c..97d6df666fd54f3b855c704fae3c792b37639382 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -188,6 +188,7 @@ message DeviceList { message DeviceEvent { Event event = 1; DeviceId device_id = 2; + DeviceConfig device_config = 3; } diff --git a/proto/monitoring.proto b/proto/monitoring.proto index ea7f532bbb3aa6a9e9bcb2223f85619c5ae851f8..5eb30651ee14c9c5f0215008ea517f87f94dd285 100644 --- a/proto/monitoring.proto +++ b/proto/monitoring.proto @@ -99,7 +99,7 @@ message KpiValue { message KpiList { - repeated Kpi kpi_list = 1; + repeated Kpi kpi = 1; } message KpiDescriptorList { diff --git a/run_tests_docker.sh b/run_tests_docker.sh new file mode 100755 index 0000000000000000000000000000000000000000..fd885140999ac0f045c162f361f0075af96a8d48 --- /dev/null +++ b/run_tests_docker.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# Set the URL of your local Docker registry where the images will be uploaded to. Leave it blank if you do not want to +# use any Docker registry. +REGISTRY_IMAGE="" +#REGISTRY_IMAGE="http://my-container-registry.local/" + +# Set the list of components you want to build images for, and deploy. +COMPONENTS="context device automation policy service compute monitoring centralizedattackdetector" + +# Set the tag you want to use for your images. +IMAGE_TAG="tf-dev" + +# Constants +TMP_FOLDER="./tmp" + +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +mkdir -p $TMP_LOGS_FOLDER + +for COMPONENT in $COMPONENTS; do + echo "Processing '$COMPONENT' component..." + IMAGE_NAME="$COMPONENT:$IMAGE_TAG" + IMAGE_URL="$REGISTRY_IMAGE/$IMAGE_NAME" + + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + else + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG" + fi + + if [ -n "$REGISTRY_IMAGE" ]; then + echo "Pushing Docker image to '$REGISTRY_IMAGE'..." + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi +done + +echo "Preparing for running the tests..." + +if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + +for COMPONENT in $COMPONENTS; do + IMAGE_NAME="$COMPONENT:$IMAGE_TAG" + echo " Running tests for $COMPONENT:" + docker run -it -d --name $COMPONENT $IMAGE_NAME --network=teraflowbridge + docker exec -it $COMPONENT bash -c "pytest --log-level=DEBUG --verbose $COMPONENT/tests/test_unitary.py" + docker stop $COMPONENT +done diff --git a/scripts/build_run_report_tests_locally.sh b/scripts/build_run_report_tests_locally.sh new file mode 100755 index 0000000000000000000000000000000000000000..9bdc81d9894df35a6bcc325d78e7f1f5214e8a96 --- /dev/null +++ b/scripts/build_run_report_tests_locally.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "component name required but not provided" + +COMPONENT_NAME=$1 # parameter +IMAGE_NAME="${COMPONENT_NAME}-local" +IMAGE_TAG="latest" + +if docker ps | grep $IMAGE_NAME +then + docker stop $IMAGE_NAME +fi + +if docker network list | grep teraflowbridge +then + echo "teraflowbridge is already created" +else + docker network create -d bridge teraflowbridge +fi + +docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$COMPONENT_NAME/Dockerfile . + +docker run --name $IMAGE_NAME -d -v "${PWD}/src/${COMPONENT_NAME}/tests:/home/${COMPONENT_NAME}/results" --network=teraflowbridge --rm $IMAGE_NAME:$IMAGE_TAG + +docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $COMPONENT_NAME/tests/ --junitxml=/home/${COMPONENT_NAME}/results/${COMPONENT_NAME}_report.xml" + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc + +echo +echo "Coverage report:" +echo "----------------" +docker exec -i $IMAGE_NAME bash -c "coverage report --include='${COMPONENT_NAME}/*' --show-missing" + +# docker stop $IMAGE_NAME +docker rm -f $IMAGE_NAME +docker network rm teraflowbridge diff --git a/scripts/dump_logs.sh b/scripts/dump_logs.sh new file mode 100755 index 0000000000000000000000000000000000000000..a6db945d245b832564353de71610bf720eb0acb8 --- /dev/null +++ b/scripts/dump_logs.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +mkdir -p tmp/exec_logs/$TFS_K8S_NAMESPACE/ +rm tmp/exec_logs/$TFS_K8S_NAMESPACE/* + +PODS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE --no-headers --output=custom-columns=":metadata.name") +for POD in $PODS; do + CONTAINERS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE $POD -o jsonpath='{.spec.containers[*].name}') + for CONTAINER in $CONTAINERS; do + kubectl --namespace $TFS_K8S_NAMESPACE logs pod/${POD} --container ${CONTAINER} \ + > tmp/exec_logs/$TFS_K8S_NAMESPACE/$POD\_\_$CONTAINER.log + done +done diff --git a/scripts/run_tests_locally-service.sh b/scripts/run_tests_locally-service.sh index 8a2a8d0be1d1960c6197a67e471ae29abba501a7..8816b9faa24e55e486a54852632fdb8e00db1d04 100755 --- a/scripts/run_tests_locally-service.sh +++ b/scripts/run_tests_locally-service.sh @@ -21,4 +21,5 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc # Run unitary tests and analyze coverage of code at same time coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + service/tests/test_unitary_task_scheduler.py \ service/tests/test_unitary.py diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh index 5978035127735c20ddc6387666a5434cbac61ff8..4bafc6daaa1088cb6ab2b401ae3ce4927afacf46 100755 --- a/scripts/show_logs_monitoring.sh +++ b/scripts/show_logs_monitoring.sh @@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"} # Automated steps start here ######################################################################################################################## -kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringserver diff --git a/scripts/show_logs_slice.sh b/scripts/show_logs_slice.sh new file mode 100755 index 0000000000000000000000000000000000000000..c7bc0b69588307092b22ea3c600669359f04de99 --- /dev/null +++ b/scripts/show_logs_slice.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/sliceservice diff --git a/src/automation/README.md b/src/automation/README.md index 099980bcc4172bf9e5c2d59459f40ae4331696cf..e98d2b8ab62563f43cf2c1011e91fb2a1d08d378 100644 --- a/src/automation/README.md +++ b/src/automation/README.md @@ -1,28 +1,57 @@ -# Automation TeraFlow OS service +# TeraFlowSDN Automation service -The Automation service, also known as Zero-Touch Provisioning (ZTP), is tested on Ubuntu 20.04. Follow the instructions below to build, test, and run this service on your local environment. +This repository hosts the TeraFlowSDN Automation service, also known as Zero-Touch Provisioning (ZTP) service. +Follow the instructions below to build, test, and run this service on your local environment. -## Automation Teraflow OS service architecture +## TeraFlowSDN Automation service architecture -| The Automation Teraflow OS service architecture consists of six (6) interfaces listed below: | +The TeraFlowSDN Automation architecture consists of six (6) interfaces listed below: + +Interfaces | |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 1. The `AutomationGateway` interface that implements all the rpc functions that are described in `automation.proto` file. | -| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client and implements all the rpc functions that are described in `context.proto` file. | -| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client and implements all the rpc functions that are described in `device.proto` file. | -| 4. The `AutomationService` interface that implements the `addDevice()` method by communicating with a `Context` gRPC client & a `Device` gRPC client through the use of `ContextService` interface & `DeviceService` interface respectively. | -| 5. The `ContextService` interface that implements the `getDevice()` & `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface. | -| 6. The `DeviceService` interface that implements the `getInitialConfiguration()` & `configureDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface. | +| 1. The `AutomationGateway` interface that implements all the RPC functions that are described in `automation.proto` file. | +| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client to invoke key RPC functions described in `context.proto` file. | +| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client to invoke key RPC functions described in `device.proto` file. | +| 4. The `AutomationService` interface that implements the `addDevice()`, `updateDevice()`, and `deleteDevice()` methods by communicating with a `Context` gRPC client and a `Device` gRPC client through the use of `ContextService` interface and `DeviceService` interface respectively. | +| 5. The `ContextService` interface that implements the `getDevice()` and `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface. | +| 6. The `DeviceService` interface that implements the `getInitialConfiguration()`, `configureDevice()`, and `deleteDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface. | + + +## Prerequisites +The Automation service is currently tested against Ubuntu 20.04 and Java 11. -## Run with dev profile +To quickly install Java 11 on a Debian-based Linux distro do: ```bash -./mvnw clean quarkus:dev +sudo apt-get install openjdk-11-jdk -y ``` -## Running tests +Feel free to try more recent Java versions. + +## Compile + +```bash +./mvnw compile +``` + +## Run tests + +```bash +./mvnw test +``` -Run unit and functional tests `./mvnw clean test` +## Run service + +```bash +./mvnw quarkus:dev +```` + +## Clean + +```bash +./mvnw clean +``` ## Deploying on a Kubernetes cluster @@ -30,10 +59,16 @@ To create the K8s manifest file under `target/kubernetes/kubernetes.yml` to be u ```bash ./mvnw clean package -DskipUTs -DskipITs -``` +``` To deploy the application in a K8s cluster run ```bash kubectl apply -f "manifests/automationservice.yaml" ``` + +## Maintainers + +This TeraFlowSDN service is implemented by [UBITECH](https://www.ubitech.eu). + +Feel free to contact Georgios Katsikas (gkatsikas at ubitech dot eu) in case you have questions. diff --git a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java index 2b163fdff1a29c26f98380a0c3b19666a86749fe..a281e221fb3098a76bae737b60e82d65c142d4e6 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java +++ b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java @@ -217,6 +217,7 @@ public class Serializer { builder.setDeviceId(deviceId); builder.setEvent(serialize(deviceEvent.getEvent())); + builder.setDeviceConfig(serialize(deviceEvent.getDeviceConfig().orElse(null))); return builder.build(); } @@ -224,8 +225,9 @@ public class Serializer { public DeviceEvent deserialize(ContextOuterClass.DeviceEvent deviceEvent) { final var deviceId = deserialize(deviceEvent.getDeviceId()); final var event = deserialize(deviceEvent.getEvent()); + final var deviceConfig = deserialize(deviceEvent.getDeviceConfig()); - return new DeviceEvent(deviceId, event); + return new DeviceEvent(deviceId, event, deviceConfig); } public ContextOuterClass.ConfigActionEnum serialize(ConfigActionEnum configAction) { diff --git a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java index efc0be8308fb9a75132cd604a84fd5b4822f3af7..526b9b7b2ba34edc6d538619bdb190a9aefa9d97 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java +++ b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java @@ -16,14 +16,23 @@ package eu.teraflow.automation.context.model; +import java.util.Optional; + public class DeviceEvent { private final Event event; private final String deviceId; + private final Optional deviceConfig; public DeviceEvent(String deviceId, Event event) { + this(deviceId, event, null); + } + + public DeviceEvent(String deviceId, Event event, DeviceConfig deviceConfig) { this.event = event; this.deviceId = deviceId; + this.deviceConfig = + (deviceConfig == null) ? Optional.empty() : Optional.ofNullable(deviceConfig); } public Event getEvent() { @@ -34,8 +43,14 @@ public class DeviceEvent { return deviceId; } + public Optional getDeviceConfig() { + return deviceConfig; + } + @Override public String toString() { - return String.format("%s[%s, %s]", getClass().getSimpleName(), deviceId, event.toString()); + return String.format( + "%s[%s, %s, %s]", + getClass().getSimpleName(), deviceId, event.toString(), deviceConfig.orElse(null)); } } diff --git a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java index 74f8f301ca7d4db904d9092e8f860fc4dc171a51..63f0eb45fdf0c287b68300db84ef1ef7f88418ba 100644 --- a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java +++ b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java @@ -308,14 +308,51 @@ class SerializerTest { .setTimestamp(expectedTimestamp) .setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_CREATE) .build(); + + final var expectedConfigRuleCustomA = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyA") + .setResourceValue("resourceValueA") + .build(); + + final var expectedConfigRuleCustomB = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyB") + .setResourceValue("resourceValueB") + .build(); + + final var expectedConfigRuleA = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET) + .setCustom(expectedConfigRuleCustomA) + .build(); + final var expectedConfigRuleB = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE) + .setCustom(expectedConfigRuleCustomB) + .build(); + + final var expectedDeviceConfig = + ContextOuterClass.DeviceConfig.newBuilder() + .addAllConfigRules(List.of(expectedConfigRuleA, expectedConfigRuleB)) + .build(); + final var expectedDeviceEvent = ContextOuterClass.DeviceEvent.newBuilder() .setDeviceId(expectedDeviceId) .setEvent(expectedEvent) + .setDeviceConfig(expectedDeviceConfig) .build(); final var creationEvent = new Event(1, EventTypeEnum.CREATE); - final var deviceEvent = new DeviceEvent("deviceId", creationEvent); + final var configRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA"); + final var configRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB"); + final var configRuleTypeA = new ConfigRuleTypeCustom(configRuleCustomA); + final var configRuleTypeB = new ConfigRuleTypeCustom(configRuleCustomB); + final var configRuleA = new ConfigRule(ConfigActionEnum.SET, configRuleTypeA); + final var configRuleB = new ConfigRule(ConfigActionEnum.DELETE, configRuleTypeB); + final var deviceConfig = new DeviceConfig(List.of(configRuleA, configRuleB)); + final var deviceEvent = new DeviceEvent("deviceId", creationEvent, deviceConfig); final var serializedDeviceEvent = serializer.serialize(deviceEvent); assertThat(serializedDeviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent); @@ -328,7 +365,22 @@ class SerializerTest { final var expectedTimestamp = ContextOuterClass.Timestamp.newBuilder().setTimestamp(1).build(); final var creationEvent = new Event(1, expectedEventType); - final var expectedDeviceEvent = new DeviceEvent(dummyDeviceId, creationEvent); + + final var expectedConfigRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA"); + final var expectedConfigRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB"); + + final var expectedConfigRuleTypeA = new ConfigRuleTypeCustom(expectedConfigRuleCustomA); + final var expectedConfigRuleTypeB = new ConfigRuleTypeCustom(expectedConfigRuleCustomB); + + final var expectedConfigRuleA = new ConfigRule(ConfigActionEnum.SET, expectedConfigRuleTypeA); + final var expectedConfigRuleB = + new ConfigRule(ConfigActionEnum.DELETE, expectedConfigRuleTypeB); + + final var expectedDeviceConfig = + new DeviceConfig(List.of(expectedConfigRuleA, expectedConfigRuleB)); + + final var expectedDeviceEvent = + new DeviceEvent(dummyDeviceId, creationEvent, expectedDeviceConfig); final var deviceUuid = Uuid.newBuilder().setUuid("deviceId"); final var deviceId = DeviceId.newBuilder().setDeviceUuid(deviceUuid).build(); @@ -337,8 +389,38 @@ class SerializerTest { .setTimestamp(expectedTimestamp) .setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_REMOVE) .build(); + + final var configRuleCustomA = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyA") + .setResourceValue("resourceValueA") + .build(); + final var configRuleCustomB = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyB") + .setResourceValue("resourceValueB") + .build(); + final var configRuleA = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET) + .setCustom(configRuleCustomA) + .build(); + final var configRuleB = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE) + .setCustom(configRuleCustomB) + .build(); + final var deviceConfig = + ContextOuterClass.DeviceConfig.newBuilder() + .addAllConfigRules(List.of(configRuleA, configRuleB)) + .build(); + final var serializedDeviceEvent = - ContextOuterClass.DeviceEvent.newBuilder().setDeviceId(deviceId).setEvent(event).build(); + ContextOuterClass.DeviceEvent.newBuilder() + .setDeviceId(deviceId) + .setEvent(event) + .setDeviceConfig(deviceConfig) + .build(); final var deviceEvent = serializer.deserialize(serializedDeviceEvent); assertThat(deviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent); diff --git a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java index 45a64fabb43bab645e97e9d80bc1825242006dce..3c0d7ce36fcdc4e47697ba11a4ceb3d8e8cdea0c 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java +++ b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java @@ -17331,6 +17331,21 @@ public final class ContextOuterClass { * .context.DeviceId device_id = 2; */ context.ContextOuterClass.DeviceIdOrBuilder getDeviceIdOrBuilder(); + + /** + * .context.DeviceConfig device_config = 3; + * @return Whether the deviceConfig field is set. + */ + boolean hasDeviceConfig(); + /** + * .context.DeviceConfig device_config = 3; + * @return The deviceConfig. + */ + context.ContextOuterClass.DeviceConfig getDeviceConfig(); + /** + * .context.DeviceConfig device_config = 3; + */ + context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder(); } /** * Protobuf type {@code context.DeviceEvent} @@ -17403,6 +17418,19 @@ public final class ContextOuterClass { break; } + case 26: { + context.ContextOuterClass.DeviceConfig.Builder subBuilder = null; + if (deviceConfig_ != null) { + subBuilder = deviceConfig_.toBuilder(); + } + deviceConfig_ = input.readMessage(context.ContextOuterClass.DeviceConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(deviceConfig_); + deviceConfig_ = subBuilder.buildPartial(); + } + + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -17487,6 +17515,32 @@ public final class ContextOuterClass { return getDeviceId(); } + public static final int DEVICE_CONFIG_FIELD_NUMBER = 3; + private context.ContextOuterClass.DeviceConfig deviceConfig_; + /** + * .context.DeviceConfig device_config = 3; + * @return Whether the deviceConfig field is set. + */ + @java.lang.Override + public boolean hasDeviceConfig() { + return deviceConfig_ != null; + } + /** + * .context.DeviceConfig device_config = 3; + * @return The deviceConfig. + */ + @java.lang.Override + public context.ContextOuterClass.DeviceConfig getDeviceConfig() { + return deviceConfig_ == null ? context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_; + } + /** + * .context.DeviceConfig device_config = 3; + */ + @java.lang.Override + public context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder() { + return getDeviceConfig(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -17507,6 +17561,9 @@ public final class ContextOuterClass { if (deviceId_ != null) { output.writeMessage(2, getDeviceId()); } + if (deviceConfig_ != null) { + output.writeMessage(3, getDeviceConfig()); + } unknownFields.writeTo(output); } @@ -17524,6 +17581,10 @@ public final class ContextOuterClass { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, getDeviceId()); } + if (deviceConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getDeviceConfig()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -17549,6 +17610,11 @@ public final class ContextOuterClass { if (!getDeviceId() .equals(other.getDeviceId())) return false; } + if (hasDeviceConfig() != other.hasDeviceConfig()) return false; + if (hasDeviceConfig()) { + if (!getDeviceConfig() + .equals(other.getDeviceConfig())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -17568,6 +17634,10 @@ public final class ContextOuterClass { hash = (37 * hash) + DEVICE_ID_FIELD_NUMBER; hash = (53 * hash) + getDeviceId().hashCode(); } + if (hasDeviceConfig()) { + hash = (37 * hash) + DEVICE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getDeviceConfig().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -17713,6 +17783,12 @@ public final class ContextOuterClass { deviceId_ = null; deviceIdBuilder_ = null; } + if (deviceConfigBuilder_ == null) { + deviceConfig_ = null; + } else { + deviceConfig_ = null; + deviceConfigBuilder_ = null; + } return this; } @@ -17749,6 +17825,11 @@ public final class ContextOuterClass { } else { result.deviceId_ = deviceIdBuilder_.build(); } + if (deviceConfigBuilder_ == null) { + result.deviceConfig_ = deviceConfig_; + } else { + result.deviceConfig_ = deviceConfigBuilder_.build(); + } onBuilt(); return result; } @@ -17803,6 +17884,9 @@ public final class ContextOuterClass { if (other.hasDeviceId()) { mergeDeviceId(other.getDeviceId()); } + if (other.hasDeviceConfig()) { + mergeDeviceConfig(other.getDeviceConfig()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -18069,6 +18153,125 @@ public final class ContextOuterClass { } return deviceIdBuilder_; } + + private context.ContextOuterClass.DeviceConfig deviceConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder> deviceConfigBuilder_; + /** + * .context.DeviceConfig device_config = 3; + * @return Whether the deviceConfig field is set. + */ + public boolean hasDeviceConfig() { + return deviceConfigBuilder_ != null || deviceConfig_ != null; + } + /** + * .context.DeviceConfig device_config = 3; + * @return The deviceConfig. + */ + public context.ContextOuterClass.DeviceConfig getDeviceConfig() { + if (deviceConfigBuilder_ == null) { + return deviceConfig_ == null ? context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_; + } else { + return deviceConfigBuilder_.getMessage(); + } + } + /** + * .context.DeviceConfig device_config = 3; + */ + public Builder setDeviceConfig(context.ContextOuterClass.DeviceConfig value) { + if (deviceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + deviceConfig_ = value; + onChanged(); + } else { + deviceConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * .context.DeviceConfig device_config = 3; + */ + public Builder setDeviceConfig( + context.ContextOuterClass.DeviceConfig.Builder builderForValue) { + if (deviceConfigBuilder_ == null) { + deviceConfig_ = builderForValue.build(); + onChanged(); + } else { + deviceConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .context.DeviceConfig device_config = 3; + */ + public Builder mergeDeviceConfig(context.ContextOuterClass.DeviceConfig value) { + if (deviceConfigBuilder_ == null) { + if (deviceConfig_ != null) { + deviceConfig_ = + context.ContextOuterClass.DeviceConfig.newBuilder(deviceConfig_).mergeFrom(value).buildPartial(); + } else { + deviceConfig_ = value; + } + onChanged(); + } else { + deviceConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .context.DeviceConfig device_config = 3; + */ + public Builder clearDeviceConfig() { + if (deviceConfigBuilder_ == null) { + deviceConfig_ = null; + onChanged(); + } else { + deviceConfig_ = null; + deviceConfigBuilder_ = null; + } + + return this; + } + /** + * .context.DeviceConfig device_config = 3; + */ + public context.ContextOuterClass.DeviceConfig.Builder getDeviceConfigBuilder() { + + onChanged(); + return getDeviceConfigFieldBuilder().getBuilder(); + } + /** + * .context.DeviceConfig device_config = 3; + */ + public context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder() { + if (deviceConfigBuilder_ != null) { + return deviceConfigBuilder_.getMessageOrBuilder(); + } else { + return deviceConfig_ == null ? + context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_; + } + } + /** + * .context.DeviceConfig device_config = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder> + getDeviceConfigFieldBuilder() { + if (deviceConfigBuilder_ == null) { + deviceConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder>( + getDeviceConfig(), + getParentForChildren(), + isClean()); + deviceConfig_ = null; + } + return deviceConfigBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -61981,230 +62184,234 @@ public final class ContextOuterClass { "(\0132\023.context.ConfigRule\"5\n\014DeviceIdList\022" + "%\n\ndevice_ids\030\001 \003(\0132\021.context.DeviceId\"." + "\n\nDeviceList\022 \n\007devices\030\001 \003(\0132\017.context." + - "Device\"R\n\013DeviceEvent\022\035\n\005event\030\001 \001(\0132\016.c" + - "ontext.Event\022$\n\tdevice_id\030\002 \001(\0132\021.contex" + - "t.DeviceId\"*\n\006LinkId\022 \n\tlink_uuid\030\001 \001(\0132" + - "\r.context.Uuid\"X\n\004Link\022 \n\007link_id\030\001 \001(\0132" + - "\017.context.LinkId\022.\n\021link_endpoint_ids\030\002 " + - "\003(\0132\023.context.EndPointId\"/\n\nLinkIdList\022!" + - "\n\010link_ids\030\001 \003(\0132\017.context.LinkId\"(\n\010Lin" + - "kList\022\034\n\005links\030\001 \003(\0132\r.context.Link\"L\n\tL" + - "inkEvent\022\035\n\005event\030\001 \001(\0132\016.context.Event\022" + - " \n\007link_id\030\002 \001(\0132\017.context.LinkId\"X\n\tSer" + - "viceId\022&\n\ncontext_id\030\001 \001(\0132\022.context.Con" + - "textId\022#\n\014service_uuid\030\002 \001(\0132\r.context.U" + - "uid\"\315\002\n\007Service\022&\n\nservice_id\030\001 \001(\0132\022.co" + - "ntext.ServiceId\022.\n\014service_type\030\002 \001(\0162\030." + - "context.ServiceTypeEnum\0221\n\024service_endpo" + - "int_ids\030\003 \003(\0132\023.context.EndPointId\0220\n\023se" + - "rvice_constraints\030\004 \003(\0132\023.context.Constr" + - "aint\022.\n\016service_status\030\005 \001(\0132\026.context.S" + - "erviceStatus\022.\n\016service_config\030\006 \001(\0132\026.c" + - "ontext.ServiceConfig\022%\n\ttimestamp\030\007 \001(\0132" + - "\022.context.Timestamp\"C\n\rServiceStatus\0222\n\016" + - "service_status\030\001 \001(\0162\032.context.ServiceSt" + - "atusEnum\":\n\rServiceConfig\022)\n\014config_rule" + - "s\030\001 \003(\0132\023.context.ConfigRule\"8\n\rServiceI" + - "dList\022\'\n\013service_ids\030\001 \003(\0132\022.context.Ser" + - "viceId\"1\n\013ServiceList\022\"\n\010services\030\001 \003(\0132" + - "\020.context.Service\"U\n\014ServiceEvent\022\035\n\005eve" + - "nt\030\001 \001(\0132\016.context.Event\022&\n\nservice_id\030\002" + - " \001(\0132\022.context.ServiceId\"T\n\007SliceId\022&\n\nc" + - "ontext_id\030\001 \001(\0132\022.context.ContextId\022!\n\ns" + - "lice_uuid\030\002 \001(\0132\r.context.Uuid\"\222\003\n\005Slice" + - "\022\"\n\010slice_id\030\001 \001(\0132\020.context.SliceId\022/\n\022" + - "slice_endpoint_ids\030\002 \003(\0132\023.context.EndPo" + - "intId\022.\n\021slice_constraints\030\003 \003(\0132\023.conte" + - "xt.Constraint\022-\n\021slice_service_ids\030\004 \003(\013" + - "2\022.context.ServiceId\022,\n\022slice_subslice_i" + - "ds\030\005 \003(\0132\020.context.SliceId\022*\n\014slice_stat" + - "us\030\006 \001(\0132\024.context.SliceStatus\022*\n\014slice_" + - "config\030\007 \001(\0132\024.context.SliceConfig\022(\n\013sl" + - "ice_owner\030\010 \001(\0132\023.context.SliceOwner\022%\n\t" + - "timestamp\030\t \001(\0132\022.context.Timestamp\"E\n\nS" + - "liceOwner\022!\n\nowner_uuid\030\001 \001(\0132\r.context." + - "Uuid\022\024\n\014owner_string\030\002 \001(\t\"=\n\013SliceStatu" + - "s\022.\n\014slice_status\030\001 \001(\0162\030.context.SliceS" + - "tatusEnum\"8\n\013SliceConfig\022)\n\014config_rules" + - "\030\001 \003(\0132\023.context.ConfigRule\"2\n\013SliceIdLi" + - "st\022#\n\tslice_ids\030\001 \003(\0132\020.context.SliceId\"" + - "+\n\tSliceList\022\036\n\006slices\030\001 \003(\0132\016.context.S" + - "lice\"O\n\nSliceEvent\022\035\n\005event\030\001 \001(\0132\016.cont" + - "ext.Event\022\"\n\010slice_id\030\002 \001(\0132\020.context.Sl" + - "iceId\"6\n\014ConnectionId\022&\n\017connection_uuid" + - "\030\001 \001(\0132\r.context.Uuid\"2\n\025ConnectionSetti" + - "ngs_L0\022\031\n\021lsp_symbolic_name\030\001 \001(\t\"\236\001\n\025Co" + - "nnectionSettings_L2\022\027\n\017src_mac_address\030\001" + - " \001(\t\022\027\n\017dst_mac_address\030\002 \001(\t\022\022\n\nether_t" + - "ype\030\003 \001(\r\022\017\n\007vlan_id\030\004 \001(\r\022\022\n\nmpls_label" + - "\030\005 \001(\r\022\032\n\022mpls_traffic_class\030\006 \001(\r\"t\n\025Co" + - "nnectionSettings_L3\022\026\n\016src_ip_address\030\001 " + - "\001(\t\022\026\n\016dst_ip_address\030\002 \001(\t\022\014\n\004dscp\030\003 \001(" + - "\r\022\020\n\010protocol\030\004 \001(\r\022\013\n\003ttl\030\005 \001(\r\"[\n\025Conn" + - "ectionSettings_L4\022\020\n\010src_port\030\001 \001(\r\022\020\n\010d" + - "st_port\030\002 \001(\r\022\021\n\ttcp_flags\030\003 \001(\r\022\013\n\003ttl\030" + - "\004 \001(\r\"\304\001\n\022ConnectionSettings\022*\n\002l0\030\001 \001(\013" + - "2\036.context.ConnectionSettings_L0\022*\n\002l2\030\002" + - " \001(\0132\036.context.ConnectionSettings_L2\022*\n\002" + - "l3\030\003 \001(\0132\036.context.ConnectionSettings_L3" + - "\022*\n\002l4\030\004 \001(\0132\036.context.ConnectionSetting" + - "s_L4\"\363\001\n\nConnection\022,\n\rconnection_id\030\001 \001" + - "(\0132\025.context.ConnectionId\022&\n\nservice_id\030" + - "\002 \001(\0132\022.context.ServiceId\0223\n\026path_hops_e" + - "ndpoint_ids\030\003 \003(\0132\023.context.EndPointId\022+" + - "\n\017sub_service_ids\030\004 \003(\0132\022.context.Servic" + - "eId\022-\n\010settings\030\005 \001(\0132\033.context.Connecti" + - "onSettings\"A\n\020ConnectionIdList\022-\n\016connec" + - "tion_ids\030\001 \003(\0132\025.context.ConnectionId\":\n" + - "\016ConnectionList\022(\n\013connections\030\001 \003(\0132\023.c" + - "ontext.Connection\"^\n\017ConnectionEvent\022\035\n\005" + - "event\030\001 \001(\0132\016.context.Event\022,\n\rconnectio" + - "n_id\030\002 \001(\0132\025.context.ConnectionId\"\202\001\n\nEn" + - "dPointId\022(\n\013topology_id\030\001 \001(\0132\023.context." + - "TopologyId\022$\n\tdevice_id\030\002 \001(\0132\021.context." + - "DeviceId\022$\n\rendpoint_uuid\030\003 \001(\0132\r.contex" + - "t.Uuid\"\264\001\n\010EndPoint\022(\n\013endpoint_id\030\001 \001(\013" + - "2\023.context.EndPointId\022\025\n\rendpoint_type\030\002" + - " \001(\t\0229\n\020kpi_sample_types\030\003 \003(\0162\037.kpi_sam" + - "ple_types.KpiSampleType\022,\n\021endpoint_loca" + - "tion\030\004 \001(\0132\021.context.Location\"A\n\021ConfigR" + - "ule_Custom\022\024\n\014resource_key\030\001 \001(\t\022\026\n\016reso" + - "urce_value\030\002 \001(\t\"]\n\016ConfigRule_ACL\022(\n\013en" + - "dpoint_id\030\001 \001(\0132\023.context.EndPointId\022!\n\010" + - "rule_set\030\002 \001(\0132\017.acl.AclRuleSet\"\234\001\n\nConf" + - "igRule\022)\n\006action\030\001 \001(\0162\031.context.ConfigA" + - "ctionEnum\022,\n\006custom\030\002 \001(\0132\032.context.Conf" + - "igRule_CustomH\000\022&\n\003acl\030\003 \001(\0132\027.context.C" + - "onfigRule_ACLH\000B\r\n\013config_rule\"F\n\021Constr" + - "aint_Custom\022\027\n\017constraint_type\030\001 \001(\t\022\030\n\020" + - "constraint_value\030\002 \001(\t\"E\n\023Constraint_Sch" + - "edule\022\027\n\017start_timestamp\030\001 \001(\002\022\025\n\rdurati" + - "on_days\030\002 \001(\002\"3\n\014GPS_Position\022\020\n\010latitud" + - "e\030\001 \001(\002\022\021\n\tlongitude\030\002 \001(\002\"W\n\010Location\022\020" + - "\n\006region\030\001 \001(\tH\000\022-\n\014gps_position\030\002 \001(\0132\025" + - ".context.GPS_PositionH\000B\n\n\010location\"l\n\033C" + - "onstraint_EndPointLocation\022(\n\013endpoint_i" + - "d\030\001 \001(\0132\023.context.EndPointId\022#\n\010location" + - "\030\002 \001(\0132\021.context.Location\"Y\n\033Constraint_" + - "EndPointPriority\022(\n\013endpoint_id\030\001 \001(\0132\023." + - "context.EndPointId\022\020\n\010priority\030\002 \001(\r\"0\n\026" + - "Constraint_SLA_Latency\022\026\n\016e2e_latency_ms" + - "\030\001 \001(\002\"0\n\027Constraint_SLA_Capacity\022\025\n\rcap" + - "acity_gbps\030\001 \001(\002\"M\n\033Constraint_SLA_Avail" + - "ability\022\032\n\022num_disjoint_paths\030\001 \001(\r\022\022\n\na" + - "ll_active\030\002 \001(\010\"V\n\036Constraint_SLA_Isolat" + - "ion_level\0224\n\017isolation_level\030\001 \003(\0162\033.con" + - "text.IsolationLevelEnum\"\366\003\n\nConstraint\022," + - "\n\006custom\030\001 \001(\0132\032.context.Constraint_Cust" + - "omH\000\0220\n\010schedule\030\002 \001(\0132\034.context.Constra" + - "int_ScheduleH\000\022A\n\021endpoint_location\030\003 \001(" + - "\0132$.context.Constraint_EndPointLocationH" + - "\000\022A\n\021endpoint_priority\030\004 \001(\0132$.context.C" + - "onstraint_EndPointPriorityH\000\0228\n\014sla_capa" + - "city\030\005 \001(\0132 .context.Constraint_SLA_Capa" + - "cityH\000\0226\n\013sla_latency\030\006 \001(\0132\037.context.Co" + - "nstraint_SLA_LatencyH\000\022@\n\020sla_availabili" + - "ty\030\007 \001(\0132$.context.Constraint_SLA_Availa" + - "bilityH\000\022@\n\rsla_isolation\030\010 \001(\0132\'.contex" + - "t.Constraint_SLA_Isolation_levelH\000B\014\n\nco" + - "nstraint\"^\n\022TeraFlowController\022&\n\ncontex" + - "t_id\030\001 \001(\0132\022.context.ContextId\022\022\n\nip_add" + - "ress\030\002 \001(\t\022\014\n\004port\030\003 \001(\r\"U\n\024Authenticati" + - "onResult\022&\n\ncontext_id\030\001 \001(\0132\022.context.C" + - "ontextId\022\025\n\rauthenticated\030\002 \001(\010*j\n\rEvent" + - "TypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000\022\024\n\020EVE" + - "NTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDATE\020\002\022\024\n" + - "\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDriverEnum" + - "\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027DEVICEDR" + - "IVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER_TRANSP" + - "ORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n\"DEVICE" + - "DRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n\027DEVICE" + - "DRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOperationa" + - "lStatusEnum\022%\n!DEVICEOPERATIONALSTATUS_U" + - "NDEFINED\020\000\022$\n DEVICEOPERATIONALSTATUS_DI" + - "SABLED\020\001\022#\n\037DEVICEOPERATIONALSTATUS_ENAB" + - "LED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERVICETYPE" + - "_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022\024\n\020SERV" + - "ICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAPI_CONNE" + - "CTIVITY_SERVICE\020\003*\250\001\n\021ServiceStatusEnum\022" + - "\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025SERVICES" + - "TATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_ACTIVE\020" + - "\002\022!\n\035SERVICESTATUS_PENDING_REMOVAL\020\003\022\036\n\032" + - "SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017SliceSt" + - "atusEnum\022\031\n\025SLICESTATUS_UNDEFINED\020\000\022\027\n\023S" + - "LICESTATUS_PLANNED\020\001\022\024\n\020SLICESTATUS_INIT" + - "\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLICESTATU" + - "S_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIOLATED\020\005" + - "*]\n\020ConfigActionEnum\022\032\n\026CONFIGACTION_UND" + - "EFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n\023CONFIG" + - "ACTION_DELETE\020\002*\203\002\n\022IsolationLevelEnum\022\020" + - "\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOLATION\020\001" + - "\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCESS_ISOLA" + - "TION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATION\020\004\022\036\n" + - "\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032VIRTUAL" + - "_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_FUNCTIO" + - "NS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATION\020\0102\357\022" + - "\n\016ContextService\022:\n\016ListContextIds\022\016.con" + - "text.Empty\032\026.context.ContextIdList\"\000\0226\n\014" + - "ListContexts\022\016.context.Empty\032\024.context.C" + - "ontextList\"\000\0224\n\nGetContext\022\022.context.Con" + - "textId\032\020.context.Context\"\000\0224\n\nSetContext" + - "\022\020.context.Context\032\022.context.ContextId\"\000" + - "\0225\n\rRemoveContext\022\022.context.ContextId\032\016." + - "context.Empty\"\000\022=\n\020GetContextEvents\022\016.co" + - "ntext.Empty\032\025.context.ContextEvent\"\0000\001\022@" + - "\n\017ListTopologyIds\022\022.context.ContextId\032\027." + - "context.TopologyIdList\"\000\022=\n\016ListTopologi" + - "es\022\022.context.ContextId\032\025.context.Topolog" + - "yList\"\000\0227\n\013GetTopology\022\023.context.Topolog" + - "yId\032\021.context.Topology\"\000\0227\n\013SetTopology\022" + - "\021.context.Topology\032\023.context.TopologyId\"" + - "\000\0227\n\016RemoveTopology\022\023.context.TopologyId" + - "\032\016.context.Empty\"\000\022?\n\021GetTopologyEvents\022" + - "\016.context.Empty\032\026.context.TopologyEvent\"" + - "\0000\001\0228\n\rListDeviceIds\022\016.context.Empty\032\025.c" + - "ontext.DeviceIdList\"\000\0224\n\013ListDevices\022\016.c" + - "ontext.Empty\032\023.context.DeviceList\"\000\0221\n\tG" + - "etDevice\022\021.context.DeviceId\032\017.context.De" + - "vice\"\000\0221\n\tSetDevice\022\017.context.Device\032\021.c" + - "ontext.DeviceId\"\000\0223\n\014RemoveDevice\022\021.cont" + - "ext.DeviceId\032\016.context.Empty\"\000\022;\n\017GetDev" + - "iceEvents\022\016.context.Empty\032\024.context.Devi" + - "ceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.context.Emp" + - "ty\032\023.context.LinkIdList\"\000\0220\n\tListLinks\022\016" + - ".context.Empty\032\021.context.LinkList\"\000\022+\n\007G" + - "etLink\022\017.context.LinkId\032\r.context.Link\"\000" + - "\022+\n\007SetLink\022\r.context.Link\032\017.context.Lin" + - "kId\"\000\022/\n\nRemoveLink\022\017.context.LinkId\032\016.c" + - "ontext.Empty\"\000\0227\n\rGetLinkEvents\022\016.contex" + - "t.Empty\032\022.context.LinkEvent\"\0000\001\022>\n\016ListS" + - "erviceIds\022\022.context.ContextId\032\026.context." + - "ServiceIdList\"\000\022:\n\014ListServices\022\022.contex" + - "t.ContextId\032\024.context.ServiceList\"\000\0224\n\nG" + - "etService\022\022.context.ServiceId\032\020.context." + - "Service\"\000\0224\n\nSetService\022\020.context.Servic" + - "e\032\022.context.ServiceId\"\000\0225\n\rRemoveService" + - "\022\022.context.ServiceId\032\016.context.Empty\"\000\022=" + - "\n\020GetServiceEvents\022\016.context.Empty\032\025.con" + - "text.ServiceEvent\"\0000\001\022:\n\014ListSliceIds\022\022." + - "context.ContextId\032\024.context.SliceIdList\"" + - "\000\0226\n\nListSlices\022\022.context.ContextId\032\022.co" + - "ntext.SliceList\"\000\022.\n\010GetSlice\022\020.context." + - "SliceId\032\016.context.Slice\"\000\022.\n\010SetSlice\022\016." + - "context.Slice\032\020.context.SliceId\"\000\0221\n\013Rem" + - "oveSlice\022\020.context.SliceId\032\016.context.Emp" + - "ty\"\000\0229\n\016GetSliceEvents\022\016.context.Empty\032\023" + - ".context.SliceEvent\"\0000\001\022D\n\021ListConnectio" + - "nIds\022\022.context.ServiceId\032\031.context.Conne" + - "ctionIdList\"\000\022@\n\017ListConnections\022\022.conte" + - "xt.ServiceId\032\027.context.ConnectionList\"\000\022" + - "=\n\rGetConnection\022\025.context.ConnectionId\032" + - "\023.context.Connection\"\000\022=\n\rSetConnection\022" + - "\023.context.Connection\032\025.context.Connectio" + - "nId\"\000\022;\n\020RemoveConnection\022\025.context.Conn" + - "ectionId\032\016.context.Empty\"\000\022C\n\023GetConnect" + - "ionEvents\022\016.context.Empty\032\030.context.Conn" + - "ectionEvent\"\0000\001b\006proto3" + "Device\"\200\001\n\013DeviceEvent\022\035\n\005event\030\001 \001(\0132\016." + + "context.Event\022$\n\tdevice_id\030\002 \001(\0132\021.conte" + + "xt.DeviceId\022,\n\rdevice_config\030\003 \001(\0132\025.con" + + "text.DeviceConfig\"*\n\006LinkId\022 \n\tlink_uuid" + + "\030\001 \001(\0132\r.context.Uuid\"X\n\004Link\022 \n\007link_id" + + "\030\001 \001(\0132\017.context.LinkId\022.\n\021link_endpoint" + + "_ids\030\002 \003(\0132\023.context.EndPointId\"/\n\nLinkI" + + "dList\022!\n\010link_ids\030\001 \003(\0132\017.context.LinkId" + + "\"(\n\010LinkList\022\034\n\005links\030\001 \003(\0132\r.context.Li" + + "nk\"L\n\tLinkEvent\022\035\n\005event\030\001 \001(\0132\016.context" + + ".Event\022 \n\007link_id\030\002 \001(\0132\017.context.LinkId" + + "\"X\n\tServiceId\022&\n\ncontext_id\030\001 \001(\0132\022.cont" + + "ext.ContextId\022#\n\014service_uuid\030\002 \001(\0132\r.co" + + "ntext.Uuid\"\315\002\n\007Service\022&\n\nservice_id\030\001 \001" + + "(\0132\022.context.ServiceId\022.\n\014service_type\030\002" + + " \001(\0162\030.context.ServiceTypeEnum\0221\n\024servic" + + "e_endpoint_ids\030\003 \003(\0132\023.context.EndPointI" + + "d\0220\n\023service_constraints\030\004 \003(\0132\023.context" + + ".Constraint\022.\n\016service_status\030\005 \001(\0132\026.co" + + "ntext.ServiceStatus\022.\n\016service_config\030\006 " + + "\001(\0132\026.context.ServiceConfig\022%\n\ttimestamp" + + "\030\007 \001(\0132\022.context.Timestamp\"C\n\rServiceSta" + + "tus\0222\n\016service_status\030\001 \001(\0162\032.context.Se" + + "rviceStatusEnum\":\n\rServiceConfig\022)\n\014conf" + + "ig_rules\030\001 \003(\0132\023.context.ConfigRule\"8\n\rS" + + "erviceIdList\022\'\n\013service_ids\030\001 \003(\0132\022.cont" + + "ext.ServiceId\"1\n\013ServiceList\022\"\n\010services" + + "\030\001 \003(\0132\020.context.Service\"U\n\014ServiceEvent" + + "\022\035\n\005event\030\001 \001(\0132\016.context.Event\022&\n\nservi" + + "ce_id\030\002 \001(\0132\022.context.ServiceId\"T\n\007Slice" + + "Id\022&\n\ncontext_id\030\001 \001(\0132\022.context.Context" + + "Id\022!\n\nslice_uuid\030\002 \001(\0132\r.context.Uuid\"\222\003" + + "\n\005Slice\022\"\n\010slice_id\030\001 \001(\0132\020.context.Slic" + + "eId\022/\n\022slice_endpoint_ids\030\002 \003(\0132\023.contex" + + "t.EndPointId\022.\n\021slice_constraints\030\003 \003(\0132" + + "\023.context.Constraint\022-\n\021slice_service_id" + + "s\030\004 \003(\0132\022.context.ServiceId\022,\n\022slice_sub" + + "slice_ids\030\005 \003(\0132\020.context.SliceId\022*\n\014sli" + + "ce_status\030\006 \001(\0132\024.context.SliceStatus\022*\n" + + "\014slice_config\030\007 \001(\0132\024.context.SliceConfi" + + "g\022(\n\013slice_owner\030\010 \001(\0132\023.context.SliceOw" + + "ner\022%\n\ttimestamp\030\t \001(\0132\022.context.Timesta" + + "mp\"E\n\nSliceOwner\022!\n\nowner_uuid\030\001 \001(\0132\r.c" + + "ontext.Uuid\022\024\n\014owner_string\030\002 \001(\t\"=\n\013Sli" + + "ceStatus\022.\n\014slice_status\030\001 \001(\0162\030.context" + + ".SliceStatusEnum\"8\n\013SliceConfig\022)\n\014confi" + + "g_rules\030\001 \003(\0132\023.context.ConfigRule\"2\n\013Sl" + + "iceIdList\022#\n\tslice_ids\030\001 \003(\0132\020.context.S" + + "liceId\"+\n\tSliceList\022\036\n\006slices\030\001 \003(\0132\016.co" + + "ntext.Slice\"O\n\nSliceEvent\022\035\n\005event\030\001 \001(\013" + + "2\016.context.Event\022\"\n\010slice_id\030\002 \001(\0132\020.con" + + "text.SliceId\"6\n\014ConnectionId\022&\n\017connecti" + + "on_uuid\030\001 \001(\0132\r.context.Uuid\"2\n\025Connecti" + + "onSettings_L0\022\031\n\021lsp_symbolic_name\030\001 \001(\t" + + "\"\236\001\n\025ConnectionSettings_L2\022\027\n\017src_mac_ad" + + "dress\030\001 \001(\t\022\027\n\017dst_mac_address\030\002 \001(\t\022\022\n\n" + + "ether_type\030\003 \001(\r\022\017\n\007vlan_id\030\004 \001(\r\022\022\n\nmpl" + + "s_label\030\005 \001(\r\022\032\n\022mpls_traffic_class\030\006 \001(" + + "\r\"t\n\025ConnectionSettings_L3\022\026\n\016src_ip_add" + + "ress\030\001 \001(\t\022\026\n\016dst_ip_address\030\002 \001(\t\022\014\n\004ds" + + "cp\030\003 \001(\r\022\020\n\010protocol\030\004 \001(\r\022\013\n\003ttl\030\005 \001(\r\"" + + "[\n\025ConnectionSettings_L4\022\020\n\010src_port\030\001 \001" + + "(\r\022\020\n\010dst_port\030\002 \001(\r\022\021\n\ttcp_flags\030\003 \001(\r\022" + + "\013\n\003ttl\030\004 \001(\r\"\304\001\n\022ConnectionSettings\022*\n\002l" + + "0\030\001 \001(\0132\036.context.ConnectionSettings_L0\022" + + "*\n\002l2\030\002 \001(\0132\036.context.ConnectionSettings" + + "_L2\022*\n\002l3\030\003 \001(\0132\036.context.ConnectionSett" + + "ings_L3\022*\n\002l4\030\004 \001(\0132\036.context.Connection" + + "Settings_L4\"\363\001\n\nConnection\022,\n\rconnection" + + "_id\030\001 \001(\0132\025.context.ConnectionId\022&\n\nserv" + + "ice_id\030\002 \001(\0132\022.context.ServiceId\0223\n\026path" + + "_hops_endpoint_ids\030\003 \003(\0132\023.context.EndPo" + + "intId\022+\n\017sub_service_ids\030\004 \003(\0132\022.context" + + ".ServiceId\022-\n\010settings\030\005 \001(\0132\033.context.C" + + "onnectionSettings\"A\n\020ConnectionIdList\022-\n" + + "\016connection_ids\030\001 \003(\0132\025.context.Connecti" + + "onId\":\n\016ConnectionList\022(\n\013connections\030\001 " + + "\003(\0132\023.context.Connection\"^\n\017ConnectionEv" + + "ent\022\035\n\005event\030\001 \001(\0132\016.context.Event\022,\n\rco" + + "nnection_id\030\002 \001(\0132\025.context.ConnectionId" + + "\"\202\001\n\nEndPointId\022(\n\013topology_id\030\001 \001(\0132\023.c" + + "ontext.TopologyId\022$\n\tdevice_id\030\002 \001(\0132\021.c" + + "ontext.DeviceId\022$\n\rendpoint_uuid\030\003 \001(\0132\r" + + ".context.Uuid\"\264\001\n\010EndPoint\022(\n\013endpoint_i" + + "d\030\001 \001(\0132\023.context.EndPointId\022\025\n\rendpoint" + + "_type\030\002 \001(\t\0229\n\020kpi_sample_types\030\003 \003(\0162\037." + + "kpi_sample_types.KpiSampleType\022,\n\021endpoi" + + "nt_location\030\004 \001(\0132\021.context.Location\"A\n\021" + + "ConfigRule_Custom\022\024\n\014resource_key\030\001 \001(\t\022" + + "\026\n\016resource_value\030\002 \001(\t\"]\n\016ConfigRule_AC" + + "L\022(\n\013endpoint_id\030\001 \001(\0132\023.context.EndPoin" + + "tId\022!\n\010rule_set\030\002 \001(\0132\017.acl.AclRuleSet\"\234" + + "\001\n\nConfigRule\022)\n\006action\030\001 \001(\0162\031.context." + + "ConfigActionEnum\022,\n\006custom\030\002 \001(\0132\032.conte" + + "xt.ConfigRule_CustomH\000\022&\n\003acl\030\003 \001(\0132\027.co" + + "ntext.ConfigRule_ACLH\000B\r\n\013config_rule\"F\n" + + "\021Constraint_Custom\022\027\n\017constraint_type\030\001 " + + "\001(\t\022\030\n\020constraint_value\030\002 \001(\t\"E\n\023Constra" + + "int_Schedule\022\027\n\017start_timestamp\030\001 \001(\002\022\025\n" + + "\rduration_days\030\002 \001(\002\"3\n\014GPS_Position\022\020\n\010" + + "latitude\030\001 \001(\002\022\021\n\tlongitude\030\002 \001(\002\"W\n\010Loc" + + "ation\022\020\n\006region\030\001 \001(\tH\000\022-\n\014gps_position\030" + + "\002 \001(\0132\025.context.GPS_PositionH\000B\n\n\010locati" + + "on\"l\n\033Constraint_EndPointLocation\022(\n\013end" + + "point_id\030\001 \001(\0132\023.context.EndPointId\022#\n\010l" + + "ocation\030\002 \001(\0132\021.context.Location\"Y\n\033Cons" + + "traint_EndPointPriority\022(\n\013endpoint_id\030\001" + + " \001(\0132\023.context.EndPointId\022\020\n\010priority\030\002 " + + "\001(\r\"0\n\026Constraint_SLA_Latency\022\026\n\016e2e_lat" + + "ency_ms\030\001 \001(\002\"0\n\027Constraint_SLA_Capacity" + + "\022\025\n\rcapacity_gbps\030\001 \001(\002\"M\n\033Constraint_SL" + + "A_Availability\022\032\n\022num_disjoint_paths\030\001 \001" + + "(\r\022\022\n\nall_active\030\002 \001(\010\"V\n\036Constraint_SLA" + + "_Isolation_level\0224\n\017isolation_level\030\001 \003(" + + "\0162\033.context.IsolationLevelEnum\"\366\003\n\nConst" + + "raint\022,\n\006custom\030\001 \001(\0132\032.context.Constrai" + + "nt_CustomH\000\0220\n\010schedule\030\002 \001(\0132\034.context." + + "Constraint_ScheduleH\000\022A\n\021endpoint_locati" + + "on\030\003 \001(\0132$.context.Constraint_EndPointLo" + + "cationH\000\022A\n\021endpoint_priority\030\004 \001(\0132$.co" + + "ntext.Constraint_EndPointPriorityH\000\0228\n\014s" + + "la_capacity\030\005 \001(\0132 .context.Constraint_S" + + "LA_CapacityH\000\0226\n\013sla_latency\030\006 \001(\0132\037.con" + + "text.Constraint_SLA_LatencyH\000\022@\n\020sla_ava" + + "ilability\030\007 \001(\0132$.context.Constraint_SLA" + + "_AvailabilityH\000\022@\n\rsla_isolation\030\010 \001(\0132\'" + + ".context.Constraint_SLA_Isolation_levelH" + + "\000B\014\n\nconstraint\"^\n\022TeraFlowController\022&\n" + + "\ncontext_id\030\001 \001(\0132\022.context.ContextId\022\022\n" + + "\nip_address\030\002 \001(\t\022\014\n\004port\030\003 \001(\r\"U\n\024Authe" + + "nticationResult\022&\n\ncontext_id\030\001 \001(\0132\022.co" + + "ntext.ContextId\022\025\n\rauthenticated\030\002 \001(\010*j" + + "\n\rEventTypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000" + + "\022\024\n\020EVENTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDA" + + "TE\020\002\022\024\n\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDri" + + "verEnum\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027D" + + "EVICEDRIVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER" + + "_TRANSPORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n" + + "\"DEVICEDRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n" + + "\027DEVICEDRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOpe" + + "rationalStatusEnum\022%\n!DEVICEOPERATIONALS" + + "TATUS_UNDEFINED\020\000\022$\n DEVICEOPERATIONALST" + + "ATUS_DISABLED\020\001\022#\n\037DEVICEOPERATIONALSTAT" + + "US_ENABLED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERV" + + "ICETYPE_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022" + + "\024\n\020SERVICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAP" + + "I_CONNECTIVITY_SERVICE\020\003*\250\001\n\021ServiceStat" + + "usEnum\022\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025S" + + "ERVICESTATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_" + + "ACTIVE\020\002\022!\n\035SERVICESTATUS_PENDING_REMOVA" + + "L\020\003\022\036\n\032SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017" + + "SliceStatusEnum\022\031\n\025SLICESTATUS_UNDEFINED" + + "\020\000\022\027\n\023SLICESTATUS_PLANNED\020\001\022\024\n\020SLICESTAT" + + "US_INIT\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLI" + + "CESTATUS_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIO" + + "LATED\020\005*]\n\020ConfigActionEnum\022\032\n\026CONFIGACT" + + "ION_UNDEFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n" + + "\023CONFIGACTION_DELETE\020\002*\203\002\n\022IsolationLeve" + + "lEnum\022\020\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOL" + + "ATION\020\001\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCES" + + "S_ISOLATION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATI" + + "ON\020\004\022\036\n\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032" + + "VIRTUAL_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_" + + "FUNCTIONS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATI" + + "ON\020\0102\331\023\n\016ContextService\022:\n\016ListContextId" + + "s\022\016.context.Empty\032\026.context.ContextIdLis" + + "t\"\000\0226\n\014ListContexts\022\016.context.Empty\032\024.co" + + "ntext.ContextList\"\000\0224\n\nGetContext\022\022.cont" + + "ext.ContextId\032\020.context.Context\"\000\0224\n\nSet" + + "Context\022\020.context.Context\032\022.context.Cont" + + "extId\"\000\0225\n\rRemoveContext\022\022.context.Conte" + + "xtId\032\016.context.Empty\"\000\022=\n\020GetContextEven" + + "ts\022\016.context.Empty\032\025.context.ContextEven" + + "t\"\0000\001\022@\n\017ListTopologyIds\022\022.context.Conte" + + "xtId\032\027.context.TopologyIdList\"\000\022=\n\016ListT" + + "opologies\022\022.context.ContextId\032\025.context." + + "TopologyList\"\000\0227\n\013GetTopology\022\023.context." + + "TopologyId\032\021.context.Topology\"\000\0227\n\013SetTo" + + "pology\022\021.context.Topology\032\023.context.Topo" + + "logyId\"\000\0227\n\016RemoveTopology\022\023.context.Top" + + "ologyId\032\016.context.Empty\"\000\022?\n\021GetTopology" + + "Events\022\016.context.Empty\032\026.context.Topolog" + + "yEvent\"\0000\001\0228\n\rListDeviceIds\022\016.context.Em" + + "pty\032\025.context.DeviceIdList\"\000\0224\n\013ListDevi" + + "ces\022\016.context.Empty\032\023.context.DeviceList" + + "\"\000\0221\n\tGetDevice\022\021.context.DeviceId\032\017.con" + + "text.Device\"\000\0221\n\tSetDevice\022\017.context.Dev" + + "ice\032\021.context.DeviceId\"\000\0223\n\014RemoveDevice" + + "\022\021.context.DeviceId\032\016.context.Empty\"\000\022;\n" + + "\017GetDeviceEvents\022\016.context.Empty\032\024.conte" + + "xt.DeviceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.cont" + + "ext.Empty\032\023.context.LinkIdList\"\000\0220\n\tList" + + "Links\022\016.context.Empty\032\021.context.LinkList" + + "\"\000\022+\n\007GetLink\022\017.context.LinkId\032\r.context" + + ".Link\"\000\022+\n\007SetLink\022\r.context.Link\032\017.cont" + + "ext.LinkId\"\000\022/\n\nRemoveLink\022\017.context.Lin" + + "kId\032\016.context.Empty\"\000\0227\n\rGetLinkEvents\022\016" + + ".context.Empty\032\022.context.LinkEvent\"\0000\001\022>" + + "\n\016ListServiceIds\022\022.context.ContextId\032\026.c" + + "ontext.ServiceIdList\"\000\022:\n\014ListServices\022\022" + + ".context.ContextId\032\024.context.ServiceList" + + "\"\000\0224\n\nGetService\022\022.context.ServiceId\032\020.c" + + "ontext.Service\"\000\0224\n\nSetService\022\020.context" + + ".Service\032\022.context.ServiceId\"\000\0226\n\014UnsetS" + + "ervice\022\020.context.Service\032\022.context.Servi" + + "ceId\"\000\0225\n\rRemoveService\022\022.context.Servic" + + "eId\032\016.context.Empty\"\000\022=\n\020GetServiceEvent" + + "s\022\016.context.Empty\032\025.context.ServiceEvent" + + "\"\0000\001\022:\n\014ListSliceIds\022\022.context.ContextId" + + "\032\024.context.SliceIdList\"\000\0226\n\nListSlices\022\022" + + ".context.ContextId\032\022.context.SliceList\"\000" + + "\022.\n\010GetSlice\022\020.context.SliceId\032\016.context" + + ".Slice\"\000\022.\n\010SetSlice\022\016.context.Slice\032\020.c" + + "ontext.SliceId\"\000\0220\n\nUnsetSlice\022\016.context" + + ".Slice\032\020.context.SliceId\"\000\0221\n\013RemoveSlic" + + "e\022\020.context.SliceId\032\016.context.Empty\"\000\0229\n" + + "\016GetSliceEvents\022\016.context.Empty\032\023.contex" + + "t.SliceEvent\"\0000\001\022D\n\021ListConnectionIds\022\022." + + "context.ServiceId\032\031.context.ConnectionId" + + "List\"\000\022@\n\017ListConnections\022\022.context.Serv" + + "iceId\032\027.context.ConnectionList\"\000\022=\n\rGetC" + + "onnection\022\025.context.ConnectionId\032\023.conte" + + "xt.Connection\"\000\022=\n\rSetConnection\022\023.conte" + + "xt.Connection\032\025.context.ConnectionId\"\000\022;" + + "\n\020RemoveConnection\022\025.context.ConnectionI" + + "d\032\016.context.Empty\"\000\022C\n\023GetConnectionEven" + + "ts\022\016.context.Empty\032\030.context.ConnectionE" + + "vent\"\0000\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -62331,7 +62538,7 @@ public final class ContextOuterClass { internal_static_context_DeviceEvent_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_context_DeviceEvent_descriptor, - new java.lang.String[] { "Event", "DeviceId", }); + new java.lang.String[] { "Event", "DeviceId", "DeviceConfig", }); internal_static_context_LinkId_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_context_LinkId_fieldAccessorTable = new diff --git a/src/automation/target/generated-sources/grpc/context/ContextService.java b/src/automation/target/generated-sources/grpc/context/ContextService.java index d54c56057ca53e40071490d3b9aa313a13a77665..814ea98b65370f8fd3ffd752c77bec04997a5dd6 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextService.java +++ b/src/automation/target/generated-sources/grpc/context/ContextService.java @@ -56,6 +56,8 @@ public interface ContextService extends MutinyService { io.smallrye.mutiny.Uni setService(context.ContextOuterClass.Service request); + io.smallrye.mutiny.Uni unsetService(context.ContextOuterClass.Service request); + io.smallrye.mutiny.Uni removeService(context.ContextOuterClass.ServiceId request); io.smallrye.mutiny.Uni listSliceIds(context.ContextOuterClass.ContextId request); @@ -66,6 +68,8 @@ public interface ContextService extends MutinyService { io.smallrye.mutiny.Uni setSlice(context.ContextOuterClass.Slice request); + io.smallrye.mutiny.Uni unsetSlice(context.ContextOuterClass.Slice request); + io.smallrye.mutiny.Uni removeSlice(context.ContextOuterClass.SliceId request); io.smallrye.mutiny.Uni listConnectionIds(context.ContextOuterClass.ServiceId request); diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java b/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java index f552294b8e6d645af41cc30632ae0432504bbc67..2b0099f106265e34d1f60bb3e0ecdc35f81895ee 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java +++ b/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java @@ -208,6 +208,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI } } @Override + public io.smallrye.mutiny.Uni unsetService(context.ContextOuterClass.Service request) { + try { + return delegate.unsetService(request); + } catch (UnsupportedOperationException e) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + } + @Override public io.smallrye.mutiny.Uni removeService(context.ContextOuterClass.ServiceId request) { try { return delegate.removeService(request); @@ -248,6 +256,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI } } @Override + public io.smallrye.mutiny.Uni unsetSlice(context.ContextOuterClass.Slice request) { + try { + return delegate.unsetSlice(request); + } catch (UnsupportedOperationException e) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + } + @Override public io.smallrye.mutiny.Uni removeSlice(context.ContextOuterClass.SliceId request) { try { return delegate.removeSlice(request); diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java b/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java index c6493bd4d381967238e5eb87dd717f679d028526..c518a0b4622522728e0eb22fdbeb80442b10f7ef 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java +++ b/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java @@ -117,6 +117,10 @@ public class ContextServiceClient implements ContextService, MutinyClient unsetService(context.ContextOuterClass.Service request) { + return stub.unsetService(request); + } + @Override public io.smallrye.mutiny.Uni removeService(context.ContextOuterClass.ServiceId request) { return stub.removeService(request); } @@ -137,6 +141,10 @@ public class ContextServiceClient implements ContextService, MutinyClient unsetSlice(context.ContextOuterClass.Slice request) { + return stub.unsetSlice(request); + } + @Override public io.smallrye.mutiny.Uni removeSlice(context.ContextOuterClass.SliceId request) { return stub.removeSlice(request); } diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java b/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java index be720c127439e50f68c2518332f85f750d6579ee..f59378086c84d0776cc25fb7aa9640403b072c0f 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java +++ b/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java @@ -882,6 +882,37 @@ public final class ContextServiceGrpc { return getSetServiceMethod; } + private static volatile io.grpc.MethodDescriptor getUnsetServiceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UnsetService", + requestType = context.ContextOuterClass.Service.class, + responseType = context.ContextOuterClass.ServiceId.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getUnsetServiceMethod() { + io.grpc.MethodDescriptor getUnsetServiceMethod; + if ((getUnsetServiceMethod = ContextServiceGrpc.getUnsetServiceMethod) == null) { + synchronized (ContextServiceGrpc.class) { + if ((getUnsetServiceMethod = ContextServiceGrpc.getUnsetServiceMethod) == null) { + ContextServiceGrpc.getUnsetServiceMethod = getUnsetServiceMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UnsetService")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.Service.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.ServiceId.getDefaultInstance())) + .setSchemaDescriptor(new ContextServiceMethodDescriptorSupplier("UnsetService")) + .build(); + } + } + } + return getUnsetServiceMethod; + } + private static volatile io.grpc.MethodDescriptor getRemoveServiceMethod; @@ -1068,6 +1099,37 @@ public final class ContextServiceGrpc { return getSetSliceMethod; } + private static volatile io.grpc.MethodDescriptor getUnsetSliceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UnsetSlice", + requestType = context.ContextOuterClass.Slice.class, + responseType = context.ContextOuterClass.SliceId.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getUnsetSliceMethod() { + io.grpc.MethodDescriptor getUnsetSliceMethod; + if ((getUnsetSliceMethod = ContextServiceGrpc.getUnsetSliceMethod) == null) { + synchronized (ContextServiceGrpc.class) { + if ((getUnsetSliceMethod = ContextServiceGrpc.getUnsetSliceMethod) == null) { + ContextServiceGrpc.getUnsetSliceMethod = getUnsetSliceMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UnsetSlice")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.Slice.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.SliceId.getDefaultInstance())) + .setSchemaDescriptor(new ContextServiceMethodDescriptorSupplier("UnsetSlice")) + .build(); + } + } + } + return getUnsetSliceMethod; + } + private static volatile io.grpc.MethodDescriptor getRemoveSliceMethod; @@ -1560,6 +1622,13 @@ public final class ContextServiceGrpc { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getSetServiceMethod(), responseObserver); } + /** + */ + public void unsetService(context.ContextOuterClass.Service request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUnsetServiceMethod(), responseObserver); + } + /** */ public void removeService(context.ContextOuterClass.ServiceId request, @@ -1602,6 +1671,13 @@ public final class ContextServiceGrpc { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getSetSliceMethod(), responseObserver); } + /** + */ + public void unsetSlice(context.ContextOuterClass.Slice request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUnsetSliceMethod(), responseObserver); + } + /** */ public void removeSlice(context.ContextOuterClass.SliceId request, @@ -1856,6 +1932,13 @@ public final class ContextServiceGrpc { context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId>( this, METHODID_SET_SERVICE))) + .addMethod( + getUnsetServiceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Service, + context.ContextOuterClass.ServiceId>( + this, METHODID_UNSET_SERVICE))) .addMethod( getRemoveServiceMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( @@ -1898,6 +1981,13 @@ public final class ContextServiceGrpc { context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId>( this, METHODID_SET_SLICE))) + .addMethod( + getUnsetSliceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Slice, + context.ContextOuterClass.SliceId>( + this, METHODID_UNSET_SLICE))) .addMethod( getRemoveSliceMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( @@ -2196,6 +2286,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetServiceMethod(), getCallOptions()), request, responseObserver); } + /** + */ + public void unsetService(context.ContextOuterClass.Service request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUnsetServiceMethod(), getCallOptions()), request, responseObserver); + } + /** */ public void removeService(context.ContextOuterClass.ServiceId request, @@ -2244,6 +2342,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetSliceMethod(), getCallOptions()), request, responseObserver); } + /** + */ + public void unsetSlice(context.ContextOuterClass.Slice request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUnsetSliceMethod(), getCallOptions()), request, responseObserver); + } + /** */ public void removeSlice(context.ContextOuterClass.SliceId request, @@ -2523,6 +2629,13 @@ public final class ContextServiceGrpc { getChannel(), getSetServiceMethod(), getCallOptions(), request); } + /** + */ + public context.ContextOuterClass.ServiceId unsetService(context.ContextOuterClass.Service request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUnsetServiceMethod(), getCallOptions(), request); + } + /** */ public context.ContextOuterClass.Empty removeService(context.ContextOuterClass.ServiceId request) { @@ -2566,6 +2679,13 @@ public final class ContextServiceGrpc { getChannel(), getSetSliceMethod(), getCallOptions(), request); } + /** + */ + public context.ContextOuterClass.SliceId unsetSlice(context.ContextOuterClass.Slice request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUnsetSliceMethod(), getCallOptions(), request); + } + /** */ public context.ContextOuterClass.Empty removeSlice(context.ContextOuterClass.SliceId request) { @@ -2831,6 +2951,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetServiceMethod(), getCallOptions()), request); } + /** + */ + public com.google.common.util.concurrent.ListenableFuture unsetService( + context.ContextOuterClass.Service request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUnsetServiceMethod(), getCallOptions()), request); + } + /** */ public com.google.common.util.concurrent.ListenableFuture removeService( @@ -2871,6 +2999,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetSliceMethod(), getCallOptions()), request); } + /** + */ + public com.google.common.util.concurrent.ListenableFuture unsetSlice( + context.ContextOuterClass.Slice request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUnsetSliceMethod(), getCallOptions()), request); + } + /** */ public com.google.common.util.concurrent.ListenableFuture removeSlice( @@ -2948,20 +3084,22 @@ public final class ContextServiceGrpc { private static final int METHODID_LIST_SERVICES = 25; private static final int METHODID_GET_SERVICE = 26; private static final int METHODID_SET_SERVICE = 27; - private static final int METHODID_REMOVE_SERVICE = 28; - private static final int METHODID_GET_SERVICE_EVENTS = 29; - private static final int METHODID_LIST_SLICE_IDS = 30; - private static final int METHODID_LIST_SLICES = 31; - private static final int METHODID_GET_SLICE = 32; - private static final int METHODID_SET_SLICE = 33; - private static final int METHODID_REMOVE_SLICE = 34; - private static final int METHODID_GET_SLICE_EVENTS = 35; - private static final int METHODID_LIST_CONNECTION_IDS = 36; - private static final int METHODID_LIST_CONNECTIONS = 37; - private static final int METHODID_GET_CONNECTION = 38; - private static final int METHODID_SET_CONNECTION = 39; - private static final int METHODID_REMOVE_CONNECTION = 40; - private static final int METHODID_GET_CONNECTION_EVENTS = 41; + private static final int METHODID_UNSET_SERVICE = 28; + private static final int METHODID_REMOVE_SERVICE = 29; + private static final int METHODID_GET_SERVICE_EVENTS = 30; + private static final int METHODID_LIST_SLICE_IDS = 31; + private static final int METHODID_LIST_SLICES = 32; + private static final int METHODID_GET_SLICE = 33; + private static final int METHODID_SET_SLICE = 34; + private static final int METHODID_UNSET_SLICE = 35; + private static final int METHODID_REMOVE_SLICE = 36; + private static final int METHODID_GET_SLICE_EVENTS = 37; + private static final int METHODID_LIST_CONNECTION_IDS = 38; + private static final int METHODID_LIST_CONNECTIONS = 39; + private static final int METHODID_GET_CONNECTION = 40; + private static final int METHODID_SET_CONNECTION = 41; + private static final int METHODID_REMOVE_CONNECTION = 42; + private static final int METHODID_GET_CONNECTION_EVENTS = 43; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, @@ -3092,6 +3230,10 @@ public final class ContextServiceGrpc { serviceImpl.setService((context.ContextOuterClass.Service) request, (io.grpc.stub.StreamObserver) responseObserver); break; + case METHODID_UNSET_SERVICE: + serviceImpl.unsetService((context.ContextOuterClass.Service) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; case METHODID_REMOVE_SERVICE: serviceImpl.removeService((context.ContextOuterClass.ServiceId) request, (io.grpc.stub.StreamObserver) responseObserver); @@ -3116,6 +3258,10 @@ public final class ContextServiceGrpc { serviceImpl.setSlice((context.ContextOuterClass.Slice) request, (io.grpc.stub.StreamObserver) responseObserver); break; + case METHODID_UNSET_SLICE: + serviceImpl.unsetSlice((context.ContextOuterClass.Slice) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; case METHODID_REMOVE_SLICE: serviceImpl.removeSlice((context.ContextOuterClass.SliceId) request, (io.grpc.stub.StreamObserver) responseObserver); @@ -3237,12 +3383,14 @@ public final class ContextServiceGrpc { .addMethod(getListServicesMethod()) .addMethod(getGetServiceMethod()) .addMethod(getSetServiceMethod()) + .addMethod(getUnsetServiceMethod()) .addMethod(getRemoveServiceMethod()) .addMethod(getGetServiceEventsMethod()) .addMethod(getListSliceIdsMethod()) .addMethod(getListSlicesMethod()) .addMethod(getGetSliceMethod()) .addMethod(getSetSliceMethod()) + .addMethod(getUnsetSliceMethod()) .addMethod(getRemoveSliceMethod()) .addMethod(getGetSliceEventsMethod()) .addMethod(getListConnectionIdsMethod()) diff --git a/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java b/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java index 9f71b53786e40922546dc59cfd4328040a40bd7c..f7d2cb94e339366b54355c7e11b3ee72fa1e415c 100644 --- a/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java +++ b/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java @@ -156,6 +156,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni unsetService(context.ContextOuterClass.Service request) { + return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::unsetService); + } + + public io.smallrye.mutiny.Uni removeService(context.ContextOuterClass.ServiceId request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::removeService); } @@ -181,6 +186,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni unsetSlice(context.ContextOuterClass.Slice request) { + return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::unsetSlice); + } + + public io.smallrye.mutiny.Uni removeSlice(context.ContextOuterClass.SliceId request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::removeSlice); } @@ -383,6 +393,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni unsetService(context.ContextOuterClass.Service request) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + + public io.smallrye.mutiny.Uni removeService(context.ContextOuterClass.ServiceId request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -408,6 +423,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni unsetSlice(context.ContextOuterClass.Slice request) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + + public io.smallrye.mutiny.Uni removeSlice(context.ContextOuterClass.SliceId request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -670,6 +690,13 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId>( this, METHODID_SET_SERVICE, compression))) + .addMethod( + context.ContextServiceGrpc.getUnsetServiceMethod(), + asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Service, + context.ContextOuterClass.ServiceId>( + this, METHODID_UNSET_SERVICE, compression))) .addMethod( context.ContextServiceGrpc.getRemoveServiceMethod(), asyncUnaryCall( @@ -712,6 +739,13 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId>( this, METHODID_SET_SLICE, compression))) + .addMethod( + context.ContextServiceGrpc.getUnsetSliceMethod(), + asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Slice, + context.ContextOuterClass.SliceId>( + this, METHODID_UNSET_SLICE, compression))) .addMethod( context.ContextServiceGrpc.getRemoveSliceMethod(), asyncUnaryCall( @@ -800,20 +834,22 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M private static final int METHODID_LIST_SERVICES = 25; private static final int METHODID_GET_SERVICE = 26; private static final int METHODID_SET_SERVICE = 27; - private static final int METHODID_REMOVE_SERVICE = 28; - private static final int METHODID_GET_SERVICE_EVENTS = 29; - private static final int METHODID_LIST_SLICE_IDS = 30; - private static final int METHODID_LIST_SLICES = 31; - private static final int METHODID_GET_SLICE = 32; - private static final int METHODID_SET_SLICE = 33; - private static final int METHODID_REMOVE_SLICE = 34; - private static final int METHODID_GET_SLICE_EVENTS = 35; - private static final int METHODID_LIST_CONNECTION_IDS = 36; - private static final int METHODID_LIST_CONNECTIONS = 37; - private static final int METHODID_GET_CONNECTION = 38; - private static final int METHODID_SET_CONNECTION = 39; - private static final int METHODID_REMOVE_CONNECTION = 40; - private static final int METHODID_GET_CONNECTION_EVENTS = 41; + private static final int METHODID_UNSET_SERVICE = 28; + private static final int METHODID_REMOVE_SERVICE = 29; + private static final int METHODID_GET_SERVICE_EVENTS = 30; + private static final int METHODID_LIST_SLICE_IDS = 31; + private static final int METHODID_LIST_SLICES = 32; + private static final int METHODID_GET_SLICE = 33; + private static final int METHODID_SET_SLICE = 34; + private static final int METHODID_UNSET_SLICE = 35; + private static final int METHODID_REMOVE_SLICE = 36; + private static final int METHODID_GET_SLICE_EVENTS = 37; + private static final int METHODID_LIST_CONNECTION_IDS = 38; + private static final int METHODID_LIST_CONNECTIONS = 39; + private static final int METHODID_GET_CONNECTION = 40; + private static final int METHODID_SET_CONNECTION = 41; + private static final int METHODID_REMOVE_CONNECTION = 42; + private static final int METHODID_GET_CONNECTION_EVENTS = 43; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, @@ -1002,6 +1038,12 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M compression, serviceImpl::setService); break; + case METHODID_UNSET_SERVICE: + io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Service) request, + (io.grpc.stub.StreamObserver) responseObserver, + compression, + serviceImpl::unsetService); + break; case METHODID_REMOVE_SERVICE: io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.ServiceId) request, (io.grpc.stub.StreamObserver) responseObserver, @@ -1038,6 +1080,12 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M compression, serviceImpl::setSlice); break; + case METHODID_UNSET_SLICE: + io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Slice) request, + (io.grpc.stub.StreamObserver) responseObserver, + compression, + serviceImpl::unsetSlice); + break; case METHODID_REMOVE_SLICE: io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.SliceId) request, (io.grpc.stub.StreamObserver) responseObserver, diff --git a/src/common/rpc_method_wrapper/ServiceExceptions.py b/src/common/rpc_method_wrapper/ServiceExceptions.py index f4f0a64cad79c96dc069bd37e8d2c2be5f011c53..e8d5c79acca19117fca53ec216166c01d3f0781d 100644 --- a/src/common/rpc_method_wrapper/ServiceExceptions.py +++ b/src/common/rpc_method_wrapper/ServiceExceptions.py @@ -56,3 +56,11 @@ class OperationFailedException(ServiceException): details = 'Operation({:s}) failed'.format(str(operation)) super().__init__(grpc.StatusCode.INTERNAL, details, extra_details=extra_details) + +class NotImplementedException(ServiceException): + def __init__( + self, operation : str, extra_details : Union[str, Iterable[str]] = None + ) -> None: + + details = 'Operation({:s}) not implemented'.format(str(operation)) + super().__init__(grpc.StatusCode.UNIMPLEMENTED, details, extra_details=extra_details) diff --git a/src/common/tests/EventTools.py b/src/common/tests/EventTools.py index ceff4d60e597690b29d5f1bcac894c081eb88a56..d0f82841395ea77a7c2483099458760769f8c535 100644 --- a/src/common/tests/EventTools.py +++ b/src/common/tests/EventTools.py @@ -15,7 +15,7 @@ import json, logging from typing import Dict, List, Tuple from common.proto.context_pb2 import ( - ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, TopologyEvent) + ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent) from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.EventsCollector import EventsCollector @@ -32,6 +32,7 @@ CLASSNAME_CONTEXT_EVENT = class_to_classname(ContextEvent) CLASSNAME_TOPOLOGY_EVENT = class_to_classname(TopologyEvent) CLASSNAME_DEVICE_EVENT = class_to_classname(DeviceEvent) CLASSNAME_LINK_EVENT = class_to_classname(LinkEvent) +CLASSNAME_SLICE_EVENT = class_to_classname(SliceEvent) CLASSNAME_SERVICE_EVENT = class_to_classname(ServiceEvent) CLASSNAME_CONNECTION_EVENT = class_to_classname(ConnectionEvent) @@ -40,6 +41,7 @@ EVENT_CLASS_NAME__TO__ENTITY_ID_SELECTOR = { CLASSNAME_TOPOLOGY_EVENT : lambda event: event.topology_id, CLASSNAME_DEVICE_EVENT : lambda event: event.device_id, CLASSNAME_LINK_EVENT : lambda event: event.link_id, + CLASSNAME_SLICE_EVENT : lambda event: event.slice_id, CLASSNAME_SERVICE_EVENT : lambda event: event.service_id, CLASSNAME_CONNECTION_EVENT: lambda event: event.connection_id, } diff --git a/src/common/tools/mutex_queues/MutexQueues.py b/src/common/tools/mutex_queues/MutexQueues.py new file mode 100644 index 0000000000000000000000000000000000000000..c3ab760f281c73ae2f308044d67b2d2b81aef142 --- /dev/null +++ b/src/common/tools/mutex_queues/MutexQueues.py @@ -0,0 +1,78 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MutexQueues: +# ------------ +# This class enables to schedule and serialize operations concurrently issued +# over a number of resources. For instance, when multiple components want to +# configure devices through the Device component, configuration operations +# have to be serialized to prevent data corruptions, and race conditions, etc. +# Usage Example: +# class Servicer(): +# def __init__(self): +# # init other stuff +# self.drivers = dict() +# self.mutex_queues = MutexQueues() +# +# def configure_device(self, device_uuid, settings): +# self.mutex_queues.wait_my_turn(device_uuid) +# driver = self.drivers.get(device_uuid) +# if driver is None: +# driver = Driver(device_uuid) +# self.drivers[device_uuid] = driver +# driver.configure(settings) +# self.mutex_queues.signal_done(device_uuid) + +import threading +from queue import Queue +from typing import Dict + +class MutexQueues: + def __init__(self) -> None: + # lock to protect dictionary updates + self.lock = threading.Lock() + + # dictionaty of queues of mutexes: queue_name => queue[mutex] + # first mutex is the running one + self.mutex_queues : Dict[str, Queue[threading.Event]] = dict() + + def wait_my_turn(self, queue_name : str) -> None: + # create my mutex and enqueue it + mutex = threading.Event() + with self.lock: + queue : Queue = self.mutex_queues.setdefault(queue_name, Queue()) + first_in_queue = (queue.qsize() == 0) + queue.put_nowait(mutex) + + # if I'm the first in the queue upon addition, means there are no running tasks + # directly return without waiting + if first_in_queue: return + + # otherwise, wait for my turn in the queue + mutex.wait() + + def signal_done(self, queue_name : str) -> None: + # I'm done with my work + with self.lock: + queue : Queue = self.mutex_queues.setdefault(queue_name, Queue()) + + # remove muself from the queue + queue.get_nowait() + + # if there are no other tasks queued, return + if queue.qsize() == 0: return + + # otherwise, signal the next task in the queue to start + next_mutex : threading.Event = queue.queue[0] + next_mutex.set() diff --git a/src/common/tools/mutex_queues/__init__.py b/src/common/tools/mutex_queues/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/common/tools/mutex_queues/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index 51f75e6dbe5e430330e697da772d65703f7568c7..62f3dcbda148f1c624265ae7d76b0c17f5d36959 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -44,10 +44,20 @@ def json_service( def json_service_l3nm_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], - config_rules : List[Dict] = [] + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID ): return json_service( - service_uuid, ServiceTypeEnum.SERVICETYPE_L3NM, context_id=json_context_id(DEFAULT_CONTEXT_UUID), + service_uuid, ServiceTypeEnum.SERVICETYPE_L3NM, context_id=json_context_id(context_uuid), + status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, + config_rules=config_rules) + +def json_service_tapi_planned( + service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID + ): + + return json_service( + service_uuid, ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, context_id=json_context_id(context_uuid), status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py index 345b2fdd6950ecda802e8bd1c86e1421b5c60d84..e80681e177f0f0def3dbe75d76e7e65ceaca1e87 100644 --- a/src/compute/service/__main__.py +++ b/src/compute/service/__main__.py @@ -39,6 +39,8 @@ def main(): wait_for_environment_variables([ get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) signal.signal(signal.SIGINT, signal_handler) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index c77d714a94fa8d2d4ee9cd2c3db06949665a489c..7e050289f19b93dc710185c2b29b326bbfd156d2 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -16,12 +16,11 @@ import logging from flask import request from flask.json import jsonify from flask_restful import Resource -from common.Constants import DEFAULT_CONTEXT_UUID -from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, SliceStatusEnum +from common.proto.context_pb2 import SliceStatusEnum from context.client.ContextClient import ContextClient -from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient from .tools.Authentication import HTTP_AUTH -from .tools.ContextMethods import get_service, get_slice +from .tools.ContextMethods import get_slice from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR LOGGER = logging.getLogger(__name__) @@ -32,31 +31,22 @@ class L2VPN_Service(Resource): LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id))) LOGGER.debug('Request: {:s}'.format(str(request))) - response = jsonify({}) try: context_client = ContextClient() - target = get_service(context_client, vpn_id) - if target is not None: - if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member - raise Exception('Service retrieval failed. Wrong Service Id was returned') - service_ready_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE - service_status = target.service_status.service_status # pylint: disable=no-member - response.status_code = HTTP_OK if service_status == service_ready_status else HTTP_GATEWAYTIMEOUT - return response - target = get_slice(context_client, vpn_id) - if target is not None: - if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member - raise Exception('Slice retrieval failed. Wrong Slice Id was returned') - slice_ready_status = SliceStatusEnum.SLICESTATUS_ACTIVE - slice_status = target.slice_status.slice_status # pylint: disable=no-member - response.status_code = HTTP_OK if slice_status == slice_ready_status else HTTP_GATEWAYTIMEOUT - return response + if target is None: + raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) - raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) + if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member + raise Exception('Slice retrieval failed. Wrong Slice Id was returned') + + slice_ready_status = SliceStatusEnum.SLICESTATUS_ACTIVE + slice_status = target.slice_status.slice_status # pylint: disable=no-member + response = jsonify({}) + response.status_code = HTTP_OK if slice_status == slice_ready_status else HTTP_GATEWAYTIMEOUT except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(request))) + LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(vpn_id))) response = jsonify({'error': str(e)}) response.status_code = HTTP_SERVERERROR return response @@ -66,18 +56,21 @@ class L2VPN_Service(Resource): LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id))) LOGGER.debug('Request: {:s}'.format(str(request))) - # pylint: disable=no-member - service_id_request = ServiceId() - service_id_request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID - service_id_request.service_uuid.uuid = vpn_id - try: - service_client = ServiceClient() - service_client.DeleteService(service_id_request) + context_client = ContextClient() + + target = get_slice(context_client, vpn_id) + if target is None: + LOGGER.warning('VPN({:s}) not found in database. Nothing done.'.format(str(vpn_id))) + else: + if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member + raise Exception('Slice retrieval failed. Wrong Slice Id was returned') + slice_client = SliceClient() + slice_client.DeleteSlice(target.slice_id) response = jsonify({}) response.status_code = HTTP_NOCONTENT except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Something went wrong Deleting Service {:s}'.format(str(request))) + LOGGER.exception('Something went wrong Deleting VPN({:s})'.format(str(vpn_id))) response = jsonify({'error': str(e)}) response.status_code = HTTP_SERVERERROR return response diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py index 7b959b2895d0f0acd27058fcb5e9a571cf6553d2..f27d852f017a08cb8b854cc19568280b9de14470 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py @@ -19,8 +19,7 @@ from flask.json import jsonify from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType from common.Constants import DEFAULT_CONTEXT_UUID -from common.proto.context_pb2 import Service, ServiceStatusEnum, ServiceTypeEnum, SliceStatusEnum, Slice -from service.client.ServiceClient import ServiceClient +from common.proto.context_pb2 import SliceStatusEnum, Slice from slice.client.SliceClient import SliceClient from .schemas.vpn_service import SCHEMA_VPN_SERVICE from .tools.Authentication import HTTP_AUTH @@ -44,30 +43,16 @@ class L2VPN_Services(Resource): vpn_services : List[Dict] = request_data['ietf-l2vpn-svc:vpn-service'] for vpn_service in vpn_services: try: - vpn_service_type = vpn_service['vpn-svc-type'] - if vpn_service_type == 'vpws': - # pylint: disable=no-member - service_request = Service() - service_request.service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID - service_request.service_id.service_uuid.uuid = vpn_service['vpn-id'] - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM - service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED - - service_client = ServiceClient() - service_reply = service_client.CreateService(service_request) - if service_reply != service_request.service_id: # pylint: disable=no-member - raise Exception('Service creation failed. Wrong Service Id was returned') - elif vpn_service_type == 'vpls': - # pylint: disable=no-member - slice_request = Slice() - slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID - slice_request.slice_id.slice_uuid.uuid = vpn_service['vpn-id'] - slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED - - slice_client = SliceClient() - slice_reply = slice_client.CreateSlice(slice_request) - if slice_reply != slice_request.slice_id: # pylint: disable=no-member - raise Exception('Slice creation failed. Wrong Slice Id was returned') + # pylint: disable=no-member + slice_request = Slice() + slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID + slice_request.slice_id.slice_uuid.uuid = vpn_service['vpn-id'] + slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED + + slice_client = SliceClient() + slice_reply = slice_client.CreateSlice(slice_request) + if slice_reply != slice_request.slice_id: # pylint: disable=no-member + raise Exception('Slice creation failed. Wrong Slice Id was returned') response = jsonify({}) response.status_code = HTTP_CREATED diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 8be63895b813d7411b76ddeb33902babbf4c9743..3cc823a2aa7a06de6cb591ef6d668ba7eeef5cbd 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -12,169 +12,113 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ctypes import Union -import json, logging -from typing import Dict +import logging +from typing import Dict, Optional from flask import request from flask.json import jsonify from flask.wrappers import Response from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType -from common.proto.context_pb2 import ConfigActionEnum, Service, Slice +from common.proto.context_pb2 import Slice +from common.tools.grpc.ConfigRules import update_config_rule_custom +from common.tools.grpc.Constraints import ( + update_constraint_custom, update_constraint_endpoint_location, update_constraint_endpoint_priority, + update_constraint_sla_availability) +from common.tools.grpc.EndPointIds import update_endpoint_ids from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient -from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient from .schemas.site_network_access import SCHEMA_SITE_NETWORK_ACCESS from .tools.Authentication import HTTP_AUTH -from .tools.ContextMethods import get_service, get_slice +from .tools.ContextMethods import get_slice from .tools.HttpStatusCodes import HTTP_NOCONTENT, HTTP_SERVERERROR from .tools.Validator import validate_message -from .Constants import BEARER_MAPPINGS, DEFAULT_ADDRESS_FAMILIES, DEFAULT_BGP_AS, DEFAULT_BGP_ROUTE_TARGET, DEFAULT_MTU +from .Constants import ( + BEARER_MAPPINGS, DEFAULT_ADDRESS_FAMILIES, DEFAULT_BGP_AS, DEFAULT_BGP_ROUTE_TARGET, DEFAULT_MTU) LOGGER = logging.getLogger(__name__) -def process_site_network_access(context_client : ContextClient, site_network_access : Dict) -> Service: +def process_site_network_access(context_client : ContextClient, site_id : str, site_network_access : Dict) -> Slice: vpn_id = site_network_access['vpn-attachment']['vpn-id'] - cvlan_id = site_network_access['connection']['tagged-interface']['dot1q-vlan-tagged']['cvlan-id'] + encapsulation_type = site_network_access['connection']['encapsulation-type'] + cvlan_id = site_network_access['connection']['tagged-interface'][encapsulation_type]['cvlan-id'] + bearer_reference = site_network_access['bearer']['bearer-reference'] + access_priority : Optional[int] = site_network_access.get('availability', {}).get('access-priority') + single_active : bool = len(site_network_access.get('availability', {}).get('single-active', [])) > 0 + all_active : bool = len(site_network_access.get('availability', {}).get('all-active', [])) > 0 + + diversity_constraints = site_network_access.get('access-diversity', {}).get('constraints', {}).get('constraint', []) + raise_if_differs = True + diversity_constraints = { + constraint['constraint-type']:([ + target[0] + for target in constraint['target'].items() + if len(target[1]) == 1 + ][0], raise_if_differs) + for constraint in diversity_constraints + } + mapping = BEARER_MAPPINGS.get(bearer_reference) if mapping is None: msg = 'Specified Bearer({:s}) is not configured.' raise Exception(msg.format(str(bearer_reference))) - device_uuid,endpoint_uuid,router_id,route_distinguisher,sub_if_index,address_ip,address_prefix = mapping + ( + device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index, + address_ip, address_prefix, remote_router, circuit_id + ) = mapping - target : Union[Service, Slice, None] = None - if target is None: target = get_service(context_client, vpn_id) - if target is None: target = get_slice (context_client, vpn_id) + target = get_slice(context_client, vpn_id) if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) - # pylint: disable=no-member - endpoint_ids = target.service_endpoint_ids if isinstance(target, Service) else target.slice_endpoint_ids - - for endpoint_id in endpoint_ids: - if endpoint_id.device_id.device_uuid.uuid != device_uuid: continue - if endpoint_id.endpoint_uuid.uuid != endpoint_uuid: continue - break # found, do nothing - else: - # not found, add it - endpoint_id = endpoint_ids.add() - endpoint_id.device_id.device_uuid.uuid = device_uuid - endpoint_id.endpoint_uuid.uuid = endpoint_uuid - - if isinstance(target, Slice): return target - - for config_rule in target.service_config.config_rules: # pylint: disable=no-member - if config_rule.WhichOneof('config_rule') != 'custom': continue - if config_rule.custom.resource_key != '/settings': continue - json_settings = json.loads(config_rule.custom.resource_value) - - if 'mtu' not in json_settings: # missing, add it - json_settings['mtu'] = DEFAULT_MTU - elif json_settings['mtu'] != DEFAULT_MTU: # differs, raise exception - msg = 'Specified MTU({:s}) differs from Service MTU({:s})' - raise Exception(msg.format(str(json_settings['mtu']), str(DEFAULT_MTU))) - - if 'address_families' not in json_settings: # missing, add it - json_settings['address_families'] = DEFAULT_ADDRESS_FAMILIES - elif json_settings['address_families'] != DEFAULT_ADDRESS_FAMILIES: # differs, raise exception - msg = 'Specified AddressFamilies({:s}) differs from Service AddressFamilies({:s})' - raise Exception(msg.format(str(json_settings['address_families']), str(DEFAULT_ADDRESS_FAMILIES))) - - if 'bgp_as' not in json_settings: # missing, add it - json_settings['bgp_as'] = DEFAULT_BGP_AS - elif json_settings['bgp_as'] != DEFAULT_BGP_AS: # differs, raise exception - msg = 'Specified BgpAs({:s}) differs from Service BgpAs({:s})' - raise Exception(msg.format(str(json_settings['bgp_as']), str(DEFAULT_BGP_AS))) - - if 'bgp_route_target' not in json_settings: # missing, add it - json_settings['bgp_route_target'] = DEFAULT_BGP_ROUTE_TARGET - elif json_settings['bgp_route_target'] != DEFAULT_BGP_ROUTE_TARGET: # differs, raise exception - msg = 'Specified BgpRouteTarget({:s}) differs from Service BgpRouteTarget({:s})' - raise Exception(msg.format(str(json_settings['bgp_route_target']), str(DEFAULT_BGP_ROUTE_TARGET))) - - config_rule.custom.resource_value = json.dumps(json_settings, sort_keys=True) - break - else: - # not found, add it - config_rule = target.service_config.config_rules.add() # pylint: disable=no-member - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = '/settings' - config_rule.custom.resource_value = json.dumps({ - 'mtu' : DEFAULT_MTU, - 'address_families': DEFAULT_ADDRESS_FAMILIES, - 'bgp_as' : DEFAULT_BGP_AS, - 'bgp_route_target': DEFAULT_BGP_ROUTE_TARGET, - }, sort_keys=True) + endpoint_ids = target.slice_endpoint_ids # pylint: disable=no-member + config_rules = target.slice_config.config_rules # pylint: disable=no-member + constraints = target.slice_constraints # pylint: disable=no-member + + endpoint_id = update_endpoint_ids(endpoint_ids, device_uuid, endpoint_uuid) + + service_settings_key = '/settings' + update_config_rule_custom(config_rules, service_settings_key, { + 'mtu' : (DEFAULT_MTU, True), + 'address_families': (DEFAULT_ADDRESS_FAMILIES, True), + 'bgp_as' : (DEFAULT_BGP_AS, True), + 'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), + }) endpoint_settings_key = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) - for config_rule in target.service_config.config_rules: # pylint: disable=no-member - if config_rule.WhichOneof('config_rule') != 'custom': continue - if config_rule.custom.resource_key != endpoint_settings_key: continue - json_settings = json.loads(config_rule.custom.resource_value) - - if 'router_id' not in json_settings: # missing, add it - json_settings['router_id'] = router_id - elif json_settings['router_id'] != router_id: # differs, raise exception - msg = 'Specified RouterId({:s}) differs from Service RouterId({:s})' - raise Exception(msg.format(str(json_settings['router_id']), str(router_id))) - - if 'route_distinguisher' not in json_settings: # missing, add it - json_settings['route_distinguisher'] = route_distinguisher - elif json_settings['route_distinguisher'] != route_distinguisher: # differs, raise exception - msg = 'Specified RouteDistinguisher({:s}) differs from Service RouteDistinguisher({:s})' - raise Exception(msg.format(str(json_settings['route_distinguisher']), str(route_distinguisher))) - - if 'sub_interface_index' not in json_settings: # missing, add it - json_settings['sub_interface_index'] = sub_if_index - elif json_settings['sub_interface_index'] != sub_if_index: # differs, raise exception - msg = 'Specified SubInterfaceIndex({:s}) differs from Service SubInterfaceIndex({:s})' - raise Exception(msg.format( - str(json_settings['sub_interface_index']), str(sub_if_index))) - - if 'vlan_id' not in json_settings: # missing, add it - json_settings['vlan_id'] = cvlan_id - elif json_settings['vlan_id'] != cvlan_id: # differs, raise exception - msg = 'Specified VLANId({:s}) differs from Service VLANId({:s})' - raise Exception(msg.format( - str(json_settings['vlan_id']), str(cvlan_id))) - - if 'address_ip' not in json_settings: # missing, add it - json_settings['address_ip'] = address_ip - elif json_settings['address_ip'] != address_ip: # differs, raise exception - msg = 'Specified AddressIP({:s}) differs from Service AddressIP({:s})' - raise Exception(msg.format( - str(json_settings['address_ip']), str(address_ip))) - - if 'address_prefix' not in json_settings: # missing, add it - json_settings['address_prefix'] = address_prefix - elif json_settings['address_prefix'] != address_prefix: # differs, raise exception - msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})' - raise Exception(msg.format( - str(json_settings['address_prefix']), str(address_prefix))) - - config_rule.custom.resource_value = json.dumps(json_settings, sort_keys=True) - break - else: - # not found, add it - config_rule = target.service_config.config_rules.add() # pylint: disable=no-member - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = endpoint_settings_key - config_rule.custom.resource_value = json.dumps({ - 'router_id': router_id, - 'route_distinguisher': route_distinguisher, - 'sub_interface_index': sub_if_index, - 'vlan_id': cvlan_id, - 'address_ip': address_ip, - 'address_prefix': address_prefix, - }, sort_keys=True) + field_updates = {} + if router_id is not None: field_updates['router_id' ] = (router_id, True) + if route_dist is not None: field_updates['route_distinguisher'] = (route_dist, True) + if sub_if_index is not None: field_updates['sub_interface_index'] = (sub_if_index, True) + if cvlan_id is not None: field_updates['vlan_id' ] = (cvlan_id, True) + if address_ip is not None: field_updates['address_ip' ] = (address_ip, True) + if address_prefix is not None: field_updates['address_prefix' ] = (address_prefix, True) + if remote_router is not None: field_updates['remote_router' ] = (remote_router, True) + if circuit_id is not None: field_updates['circuit_id' ] = (circuit_id, True) + update_config_rule_custom(config_rules, endpoint_settings_key, field_updates) + + if len(diversity_constraints) > 0: + update_constraint_custom(constraints, 'diversity', diversity_constraints) + + update_constraint_endpoint_location(constraints, endpoint_id, region=site_id) + if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority) + if single_active or all_active: + # assume 1 disjoint path per endpoint/location included in service/slice + location_endpoints = {} + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'endpoint_location': continue + str_endpoint_id = grpc_message_to_json_string(constraint.endpoint_location.endpoint_id) + str_location_id = grpc_message_to_json_string(constraint.endpoint_location.location) + location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id) + num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()} + num_disjoint_paths = min(num_endpoints_per_location) + update_constraint_sla_availability(constraints, num_disjoint_paths, all_active) return target def process_list_site_network_access( - context_client : ContextClient, service_client : ServiceClient, slice_client : SliceClient, - request_data : Dict + context_client : ContextClient, slice_client : SliceClient, site_id : str, request_data : Dict ) -> Response: LOGGER.debug('Request: {:s}'.format(str(request_data))) @@ -182,21 +126,14 @@ def process_list_site_network_access( errors = [] for site_network_access in request_data['ietf-l2vpn-svc:site-network-access']: - sna_request = process_site_network_access(context_client, site_network_access) + sna_request = process_site_network_access(context_client, site_id, site_network_access) LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request))) try: - if isinstance(sna_request, Service): - sna_reply = service_client.UpdateService(sna_request) - if sna_reply != sna_request.service_id: # pylint: disable=no-member - raise Exception('Service update failed. Wrong Service Id was returned') - elif isinstance(sna_request, Slice): - sna_reply = slice_client.UpdateSlice(sna_request) - if sna_reply != sna_request.slice_id: # pylint: disable=no-member - raise Exception('Slice update failed. Wrong Slice Id was returned') - else: - raise NotImplementedError('Support for Class({:s}) not implemented'.format(str(type(sna_request)))) + sna_reply = slice_client.UpdateSlice(sna_request) + if sna_reply != sna_request.slice_id: # pylint: disable=no-member + raise Exception('Slice update failed. Wrong Slice Id was returned') except Exception as e: # pylint: disable=broad-except - msg = 'Something went wrong Updating Service {:s}' + msg = 'Something went wrong Updating VPN {:s}' LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request))) errors.append({'error': str(e)}) @@ -210,15 +147,13 @@ class L2VPN_SiteNetworkAccesses(Resource): if not request.is_json: raise UnsupportedMediaType('JSON payload is required') LOGGER.debug('Site_Id: {:s}'.format(str(site_id))) context_client = ContextClient() - service_client = ServiceClient() slice_client = SliceClient() - return process_list_site_network_access(context_client, service_client, slice_client, request.json) + return process_list_site_network_access(context_client, slice_client, site_id, request.json) @HTTP_AUTH.login_required def put(self, site_id : str): if not request.is_json: raise UnsupportedMediaType('JSON payload is required') LOGGER.debug('Site_Id: {:s}'.format(str(site_id))) context_client = ContextClient() - service_client = ServiceClient() slice_client = SliceClient() - return process_list_site_network_access(context_client, service_client, slice_client, request.json) + return process_list_site_network_access(context_client, slice_client, site_id, request.json) diff --git a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py b/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py index b9639e8046593c1dbf4017cff963ceb7c51d0532..e1273b4e483a06df23d94bdf107005ce7585fb5e 100644 --- a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py +++ b/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py @@ -33,6 +33,7 @@ the Layer 2 service. import requests import uuid import logging +import copy #from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError from .sdnconn import SdnConnectorBase, SdnConnectorError @@ -222,8 +223,29 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): http_code=response_service_creation.status_code, ) - """Second step, create the connections and vpn attachments""" + self.logger.info('connection_points = {:s}'.format(str(connection_points))) + + # Check if protected paths are requested + extended_connection_points = [] for connection_point in connection_points: + extended_connection_points.append(connection_point) + + connection_point_wan_info = self.search_mapp(connection_point) + service_mapping_info = connection_point_wan_info.get('service_mapping_info', {}) + redundant_service_endpoint_ids = service_mapping_info.get('redundant') + + if redundant_service_endpoint_ids is None: continue + if len(redundant_service_endpoint_ids) == 0: continue + + for redundant_service_endpoint_id in redundant_service_endpoint_ids: + redundant_connection_point = copy.deepcopy(connection_point) + redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id + extended_connection_points.append(redundant_connection_point) + + self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points))) + + """Second step, create the connections and vpn attachments""" + for connection_point in extended_connection_points: connection_point_wan_info = self.search_mapp(connection_point) site_network_access = {} connection = {} @@ -264,6 +286,23 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): site_network_access["bearer"] = connection_point_wan_info[ "service_mapping_info" ]["bearer"] + + access_priority = connection_point_wan_info["service_mapping_info"].get("priority") + if access_priority is not None: + availability = {} + availability["access-priority"] = access_priority + availability["single-active"] = [None] + site_network_access["availability"] = availability + + constraint = {} + constraint['constraint-type'] = 'end-to-end-diverse' + constraint['target'] = {'all-other-accesses': [None]} + + access_diversity = {} + access_diversity['constraints'] = {'constraint': []} + access_diversity['constraints']['constraint'].append(constraint) + site_network_access["access-diversity"] = access_diversity + site_network_accesses = {} site_network_access_list = [] site_network_access_list.append(site_network_access) @@ -332,7 +371,7 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): self.delete_connectivity_service(vpn_service["vpn-id"]) raise SdnConnectorError( - "Request no accepted", + "Request not accepted", http_code=response_endpoint_site_network_access_creation.status_code, ) except requests.exceptions.ConnectionError: diff --git a/src/context/tests/context_report.xml b/src/context/tests/context_report.xml deleted file mode 100644 index 5ee1c17cd6f59c58d55a5eba38de7ea0366a757c..0000000000000000000000000000000000000000 --- a/src/context/tests/context_report.xml +++ /dev/null @@ -1,1539 +0,0 @@ -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: -> sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:607: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = <redis.retry.Retry object at 0x7fb0186487f0> -do = <function Connection.connect.<locals>.<lambda> at 0x7fb01a79edc0> -fail = <function Connection.connect.<locals>.<lambda> at 0x7fb0186538b0> - - def call_with_retry(self, do, fail): - """ - Execute an operation that might fail and returns its result, or - raise the exception that was thrown depending on the `Backoff` object. - `do`: the operation to call. Expects no argument. - `fail`: the failure handler, expects the last error that was thrown - """ - self._backoff.reset() - failures = 0 - while True: - try: -> return do() - -/usr/local/lib/python3.9/site-packages/redis/retry.py:45: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -> lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:608: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except OSError as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: -> raise err - -/usr/local/lib/python3.9/site-packages/redis/connection.py:673: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect -> sock.connect(socket_address) -E ConnectionRefusedError: [Errno 111] Connection refused - -/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError - -During handling of the above exception, another exception occurred: - -context_client_grpc = <context.client.ContextClient.ContextClient object at 0x7fb018f15a30> -context_db_mb = (<common.orm.Database.Database object at 0x7fb018f15910>, <common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460>) - - def test_grpc_context( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- -> context_database.clear_all() - -context/tests/test_unitary.py:128: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -common/orm/Database.py:32: in clear_all - for key in self._backend.keys(): -common/orm/backend/redis/RedisBackend.py:48: in keys - return [k.decode('UTF-8') for k in self._client.keys()] -/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys - return self.execute_command("KEYS", pattern, **kwargs) -/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command - conn = self.connection or pool.get_connection(command_name, **options) -/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection - connection.connect() -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except OSError as e: -> raise ConnectionError(self._error_message(e)) -E redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused. - -/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionErrorself = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: -> sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:607: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = <redis.retry.Retry object at 0x7fb0186487f0> -do = <function Connection.connect.<locals>.<lambda> at 0x7fb018563b80> -fail = <function Connection.connect.<locals>.<lambda> at 0x7fb018587550> - - def call_with_retry(self, do, fail): - """ - Execute an operation that might fail and returns its result, or - raise the exception that was thrown depending on the `Backoff` object. - `do`: the operation to call. Expects no argument. - `fail`: the failure handler, expects the last error that was thrown - """ - self._backoff.reset() - failures = 0 - while True: - try: -> return do() - -/usr/local/lib/python3.9/site-packages/redis/retry.py:45: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -> lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:608: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except OSError as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: -> raise err - -/usr/local/lib/python3.9/site-packages/redis/connection.py:673: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect -> sock.connect(socket_address) -E ConnectionRefusedError: [Errno 111] Connection refused - -/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError - -During handling of the above exception, another exception occurred: - -context_client_grpc = <context.client.ContextClient.ContextClient object at 0x7fb018f15a30> -context_db_mb = (<common.orm.Database.Database object at 0x7fb018f15910>, <common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460>) - - def test_grpc_topology( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- -> context_database.clear_all() - -context/tests/test_unitary.py:249: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -common/orm/Database.py:32: in clear_all - for key in self._backend.keys(): -common/orm/backend/redis/RedisBackend.py:48: in keys - return [k.decode('UTF-8') for k in self._client.keys()] -/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys - return self.execute_command("KEYS", pattern, **kwargs) -/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command - conn = self.connection or pool.get_connection(command_name, **options) -/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection - connection.connect() -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except OSError as e: -> raise ConnectionError(self._error_message(e)) -E redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused. - -/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionErrorself = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: -> sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:607: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = <redis.retry.Retry object at 0x7fb0186487f0> -do = <function Connection.connect.<locals>.<lambda> at 0x7fb018683820> -fail = <function Connection.connect.<locals>.<lambda> at 0x7fb018653f70> - - def call_with_retry(self, do, fail): - """ - Execute an operation that might fail and returns its result, or - raise the exception that was thrown depending on the `Backoff` object. - `do`: the operation to call. Expects no argument. - `fail`: the failure handler, expects the last error that was thrown - """ - self._backoff.reset() - failures = 0 - while True: - try: -> return do() - -/usr/local/lib/python3.9/site-packages/redis/retry.py:45: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -> lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:608: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except OSError as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: -> raise err - -/usr/local/lib/python3.9/site-packages/redis/connection.py:673: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect -> sock.connect(socket_address) -E ConnectionRefusedError: [Errno 111] Connection refused - -/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError - -During handling of the above exception, another exception occurred: - -context_client_grpc = <context.client.ContextClient.ContextClient object at 0x7fb018f15a30> -context_db_mb = (<common.orm.Database.Database object at 0x7fb018f15910>, <common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460>) - - def test_grpc_device( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- -> context_database.clear_all() - -context/tests/test_unitary.py:381: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -common/orm/Database.py:32: in clear_all - for key in self._backend.keys(): -common/orm/backend/redis/RedisBackend.py:48: in keys - return [k.decode('UTF-8') for k in self._client.keys()] -/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys - return self.execute_command("KEYS", pattern, **kwargs) -/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command - conn = self.connection or pool.get_connection(command_name, **options) -/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection - connection.connect() -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except OSError as e: -> raise ConnectionError(self._error_message(e)) -E redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused. - -/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionErrorself = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: -> sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:607: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = <redis.retry.Retry object at 0x7fb0186487f0> -do = <function Connection.connect.<locals>.<lambda> at 0x7fb0186c0550> -fail = <function Connection.connect.<locals>.<lambda> at 0x7fb0186c0670> - - def call_with_retry(self, do, fail): - """ - Execute an operation that might fail and returns its result, or - raise the exception that was thrown depending on the `Backoff` object. - `do`: the operation to call. Expects no argument. - `fail`: the failure handler, expects the last error that was thrown - """ - self._backoff.reset() - failures = 0 - while True: - try: -> return do() - -/usr/local/lib/python3.9/site-packages/redis/retry.py:45: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -> lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:608: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except OSError as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: -> raise err - -/usr/local/lib/python3.9/site-packages/redis/connection.py:673: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect -> sock.connect(socket_address) -E ConnectionRefusedError: [Errno 111] Connection refused - -/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError - -During handling of the above exception, another exception occurred: - -context_client_grpc = <context.client.ContextClient.ContextClient object at 0x7fb018f15a30> -context_db_mb = (<common.orm.Database.Database object at 0x7fb018f15910>, <common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460>) - - def test_grpc_link( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- -> context_database.clear_all() - -context/tests/test_unitary.py:556: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -common/orm/Database.py:32: in clear_all - for key in self._backend.keys(): -common/orm/backend/redis/RedisBackend.py:48: in keys - return [k.decode('UTF-8') for k in self._client.keys()] -/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys - return self.execute_command("KEYS", pattern, **kwargs) -/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command - conn = self.connection or pool.get_connection(command_name, **options) -/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection - connection.connect() -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except OSError as e: -> raise ConnectionError(self._error_message(e)) -E redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused. - -/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionErrorself = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: -> sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:607: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = <redis.retry.Retry object at 0x7fb0186487f0> -do = <function Connection.connect.<locals>.<lambda> at 0x7fb018f2f700> -fail = <function Connection.connect.<locals>.<lambda> at 0x7fb0186188b0> - - def call_with_retry(self, do, fail): - """ - Execute an operation that might fail and returns its result, or - raise the exception that was thrown depending on the `Backoff` object. - `do`: the operation to call. Expects no argument. - `fail`: the failure handler, expects the last error that was thrown - """ - self._backoff.reset() - failures = 0 - while True: - try: -> return do() - -/usr/local/lib/python3.9/site-packages/redis/retry.py:45: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -> lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:608: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except OSError as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: -> raise err - -/usr/local/lib/python3.9/site-packages/redis/connection.py:673: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect -> sock.connect(socket_address) -E ConnectionRefusedError: [Errno 111] Connection refused - -/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError - -During handling of the above exception, another exception occurred: - -context_client_grpc = <context.client.ContextClient.ContextClient object at 0x7fb018f15a30> -context_db_mb = (<common.orm.Database.Database object at 0x7fb018f15910>, <common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460>) - - def test_grpc_service( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- -> context_database.clear_all() - -context/tests/test_unitary.py:739: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -common/orm/Database.py:32: in clear_all - for key in self._backend.keys(): -common/orm/backend/redis/RedisBackend.py:48: in keys - return [k.decode('UTF-8') for k in self._client.keys()] -/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys - return self.execute_command("KEYS", pattern, **kwargs) -/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command - conn = self.connection or pool.get_connection(command_name, **options) -/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection - connection.connect() -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except OSError as e: -> raise ConnectionError(self._error_message(e)) -E redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused. - -/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionErrorself = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: -> sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:607: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = <redis.retry.Retry object at 0x7fb0186487f0> -do = <function Connection.connect.<locals>.<lambda> at 0x7fb0186a43a0> -fail = <function Connection.connect.<locals>.<lambda> at 0x7fb0186a4310> - - def call_with_retry(self, do, fail): - """ - Execute an operation that might fail and returns its result, or - raise the exception that was thrown depending on the `Backoff` object. - `do`: the operation to call. Expects no argument. - `fail`: the failure handler, expects the last error that was thrown - """ - self._backoff.reset() - failures = 0 - while True: - try: -> return do() - -/usr/local/lib/python3.9/site-packages/redis/retry.py:45: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -> lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:608: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except OSError as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: -> raise err - -/usr/local/lib/python3.9/site-packages/redis/connection.py:673: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect -> sock.connect(socket_address) -E ConnectionRefusedError: [Errno 111] Connection refused - -/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError - -During handling of the above exception, another exception occurred: - -context_client_grpc = <context.client.ContextClient.ContextClient object at 0x7fb018f15a30> -context_db_mb = (<common.orm.Database.Database object at 0x7fb018f15910>, <common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460>) - - def test_grpc_connection( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- -> context_database.clear_all() - -context/tests/test_unitary.py:926: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -common/orm/Database.py:32: in clear_all - for key in self._backend.keys(): -common/orm/backend/redis/RedisBackend.py:48: in keys - return [k.decode('UTF-8') for k in self._client.keys()] -/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys - return self.execute_command("KEYS", pattern, **kwargs) -/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command - conn = self.connection or pool.get_connection(command_name, **options) -/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection - connection.connect() -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except OSError as e: -> raise ConnectionError(self._error_message(e)) -E redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused. - -/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionErrorself = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: -> sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:607: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = <redis.retry.Retry object at 0x7fb0186487f0> -do = <function Connection.connect.<locals>.<lambda> at 0x7fb0186d49d0> -fail = <function Connection.connect.<locals>.<lambda> at 0x7fb0186d4280> - - def call_with_retry(self, do, fail): - """ - Execute an operation that might fail and returns its result, or - raise the exception that was thrown depending on the `Backoff` object. - `do`: the operation to call. Expects no argument. - `fail`: the failure handler, expects the last error that was thrown - """ - self._backoff.reset() - failures = 0 - while True: - try: -> return do() - -/usr/local/lib/python3.9/site-packages/redis/retry.py:45: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -> lambda: self._connect(), lambda error: self.disconnect(error) - ) - -/usr/local/lib/python3.9/site-packages/redis/connection.py:608: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except OSError as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: -> raise err - -/usr/local/lib/python3.9/site-packages/redis/connection.py:673: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo( - self.host, self.port, self.socket_type, socket.SOCK_STREAM - ): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in self.socket_keepalive_options.items(): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect -> sock.connect(socket_address) -E ConnectionRefusedError: [Errno 111] Connection refused - -/usr/local/lib/python3.9/site-packages/redis/connection.py:661: ConnectionRefusedError - -During handling of the above exception, another exception occurred: - -context_db_mb = (<common.orm.Database.Database object at 0x7fb018f15910>, <common.message_broker.MessageBroker.MessageBroker object at 0x7fb018f15460>) -context_service_grpc = <context.service.grpc_server.ContextService.ContextService object at 0x7fb018f158e0> - - def test_rest_populate_database( - context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name - context_service_grpc : ContextService # pylint: disable=redefined-outer-name - ): - database = context_db_mb[0] -> database.clear_all() - -context/tests/test_unitary.py:1179: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -common/orm/Database.py:32: in clear_all - for key in self._backend.keys(): -common/orm/backend/redis/RedisBackend.py:48: in keys - return [k.decode('UTF-8') for k in self._client.keys()] -/usr/local/lib/python3.9/site-packages/redis/commands/core.py:1370: in keys - return self.execute_command("KEYS", pattern, **kwargs) -/usr/local/lib/python3.9/site-packages/redis/client.py:1173: in execute_command - conn = self.connection or pool.get_connection(command_name, **options) -/usr/local/lib/python3.9/site-packages/redis/connection.py:1370: in get_connection - connection.connect() -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -self = Connection<host=127.0.0.1,port=6379,db=0> - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self.retry.call_with_retry( - lambda: self._connect(), lambda error: self.disconnect(error) - ) - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except OSError as e: -> raise ConnectionError(self._error_message(e)) -E redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:6379. Connection refused. - -/usr/local/lib/python3.9/site-packages/redis/connection.py:613: ConnectionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -> reply = do_rest_request('/context_ids') - -context/tests/test_unitary.py:1183: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context_ids' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -> reply = do_rest_request('/contexts') - -context/tests/test_unitary.py:1187: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/contexts' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) -> reply = do_rest_request('/context/{:s}'.format(context_uuid)) - -context/tests/test_unitary.py:1192: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) -> reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid)) - -context/tests/test_unitary.py:1197: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/topology_ids' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) -> reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid)) - -context/tests/test_unitary.py:1202: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/topologies' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID) -> reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid)) - -context/tests/test_unitary.py:1208: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/topology/admin' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) -> reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid)) - -context/tests/test_unitary.py:1213: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/service_ids' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) -> reply = do_rest_request('/context/{:s}/services'.format(context_uuid)) - -context/tests/test_unitary.py:1218: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/services' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='') -> reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) - -context/tests/test_unitary.py:1224: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/service/SVC%3AR1%2FEP100-R2%2FEP100' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -> reply = do_rest_request('/device_ids') - -context/tests/test_unitary.py:1228: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/device_ids' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -> reply = do_rest_request('/devices') - -context/tests/test_unitary.py:1232: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/devices' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='') -> reply = do_rest_request('/device/{:s}'.format(device_uuid)) - -context/tests/test_unitary.py:1237: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/device/R1' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -> reply = do_rest_request('/link_ids') - -context/tests/test_unitary.py:1241: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/link_ids' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -> reply = do_rest_request('/links') - -context/tests/test_unitary.py:1245: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/links' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='') -> reply = do_rest_request('/link/{:s}'.format(link_uuid)) - -context/tests/test_unitary.py:1250: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/link/R1%2FEP2-R2%2FEP1' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') -> reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid)) - -context/tests/test_unitary.py:1256: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/service/SVC%3AR1%2FEP100-R3%2FEP100/connection_ids' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') -> reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid)) - -context/tests/test_unitary.py:1262: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/context/admin/service/SVC%3AR1%2FEP100-R3%2FEP100/connections' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionErrorcontext_service_rest = <RestServer(Thread-71, started daemon 140392926267136)> - - def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='') -> reply = do_rest_request('/connection/{:s}'.format(connection_uuid)) - -context/tests/test_unitary.py:1267: -_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - -url = '/connection/CON%3AR1%2FEP100-R3%2FEP100' - - def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -> assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -E AssertionError: Reply failed with code 500 -E assert 500 == 200 -E +500 -E -200 - -context/tests/test_unitary.py:116: AssertionError \ No newline at end of file diff --git a/src/device/requirements.in b/src/device/requirements.in index 10506fbd42c5b7a64afb3cc7c6ea32e0f1fa49f6..9c8c0ef18f3bcd4a92180465d11cd465c4336d44 100644 --- a/src/device/requirements.in +++ b/src/device/requirements.in @@ -10,6 +10,9 @@ pytz==2021.3 redis==4.1.2 requests==2.27.1 xmltodict==0.12.0 +tabulate +ipaddress +macaddress # pip's dependency resolver does not take into account installed packages. # p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one diff --git a/src/device/service/DeviceService.py b/src/device/service/DeviceService.py index 4dc2b01000d8ca6dd2b3ecee0b0f867338636c73..59134f26d3dd8c3fa0a9dddbcd1d3df298ec076a 100644 --- a/src/device/service/DeviceService.py +++ b/src/device/service/DeviceService.py @@ -23,10 +23,15 @@ from .driver_api.DriverInstanceCache import DriverInstanceCache from .DeviceServiceServicerImpl import DeviceServiceServicerImpl from .MonitoringLoops import MonitoringLoops +# Custom gRPC settings +# Multiple clients might keep connections alive waiting for RPC methods to be executed. +# Requests needs to be serialized to ensure correct device configurations +GRPC_MAX_WORKERS = 200 + class DeviceService(GenericGrpcService): def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.DEVICE) - super().__init__(port, cls_name=cls_name) + super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name) database = Database(get_database_backend(backend=BackendEnum.INMEMORY)) self.monitoring_loops = MonitoringLoops(database) self.device_servicer = DeviceServiceServicerImpl(database, driver_instance_cache, self.monitoring_loops) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 6189816bcd35dd973e4a7da389f256bdb685a79f..d5d44f34ffb69a337b715a0884aea3770b3d3cec 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -24,6 +24,7 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, OperationFailedException from common.tools.grpc.Tools import grpc_message_to_json +from common.tools.mutex_queues.MutexQueues import MutexQueues from context.client.ContextClient import ContextClient from .database.ConfigModel import ( ConfigModel, ConfigRuleModel, ORM_ConfigActionEnum, get_config_rules, grpc_config_rules_to_raw, update_config) @@ -56,6 +57,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): self.database = database self.driver_instance_cache = driver_instance_cache self.monitoring_loops = monitoring_loops + self.mutex_queues = MutexQueues() LOGGER.debug('Servicer Created') @safe_and_metered_rpc_method(METRICS, LOGGER) @@ -101,348 +103,368 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): json_request['device_config'] = {} request = Device(**json_request) - sync_device_from_context(device_uuid, self.context_client, self.database) - db_device,_ = update_device_in_local_database(self.database, request) - - driver_filter_fields = get_device_driver_filter_fields(db_device) - - #LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules))) - address = connection_config_rules.pop('address', None) - port = connection_config_rules.pop('port', None) - settings = connection_config_rules.pop('settings', '{}') + self.mutex_queues.wait_my_turn(device_uuid) try: - settings = json.loads(settings) - except ValueError as e: - raise InvalidArgumentException( - 'device.device_config.config_rules[settings]', settings, - extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e - driver : _Driver = self.driver_instance_cache.get( - device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings) - driver.Connect() - - endpoints = driver.GetConfig([RESOURCE_ENDPOINTS]) - try: - for resource_key, resource_value in endpoints: + sync_device_from_context(device_uuid, self.context_client, self.database) + db_device,_ = update_device_in_local_database(self.database, request) + + driver_filter_fields = get_device_driver_filter_fields(db_device) + + #LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules))) + address = connection_config_rules.pop('address', None) + port = connection_config_rules.pop('port', None) + settings = connection_config_rules.pop('settings', '{}') + try: + settings = json.loads(settings) + except ValueError as e: + raise InvalidArgumentException( + 'device.device_config.config_rules[settings]', settings, + extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e + driver : _Driver = self.driver_instance_cache.get( + device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings) + driver.Connect() + + endpoints = driver.GetConfig([RESOURCE_ENDPOINTS]) + try: + for resource_key, resource_value in endpoints: + if isinstance(resource_value, Exception): + LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value))) + continue + endpoint_uuid = resource_value.get('uuid') + endpoint_type = resource_value.get('type') + str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) + db_endpoint, _ = update_or_create_object( + self.database, EndPointModel, str_endpoint_key, { + 'device_fk' : db_device, + 'endpoint_uuid': endpoint_uuid, + 'endpoint_type': endpoint_type, + 'resource_key' : resource_key, + }) + sample_types : Dict[int, str] = resource_value.get('sample_types', {}) + for sample_type, monitor_resource_key in sample_types.items(): + str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) + update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, { + 'endpoint_fk' : db_endpoint, + 'resource_key' : monitor_resource_key, + 'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type), + }) + except: # pylint: disable=bare-except + LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints))) + + raw_running_config_rules = driver.GetConfig() + running_config_rules = [] + for resource_key, resource_value in raw_running_config_rules: if isinstance(resource_value, Exception): - LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value))) + msg = 'Error retrieving config rules: {:s} => {:s}' + LOGGER.error(msg.format(str(resource_key), str(resource_value))) continue - endpoint_uuid = resource_value.get('uuid') - endpoint_type = resource_value.get('type') - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) - db_endpoint, _ = update_or_create_object( - self.database, EndPointModel, str_endpoint_key, { - 'device_fk' : db_device, - 'endpoint_uuid': endpoint_uuid, - 'endpoint_type': endpoint_type, - 'resource_key' : resource_key, - }) - sample_types : Dict[int, str] = resource_value.get('sample_types', {}) - for sample_type, monitor_resource_key in sample_types.items(): - str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) - update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, { - 'endpoint_fk' : db_endpoint, - 'resource_key' : monitor_resource_key, - 'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type), - }) - except: # pylint: disable=bare-except - LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints))) - - raw_running_config_rules = driver.GetConfig() - running_config_rules = [] - for resource_key, resource_value in raw_running_config_rules: - if isinstance(resource_value, Exception): - msg = 'Error retrieving config rules: {:s} => {:s}' - LOGGER.error(msg.format(str(resource_key), str(resource_value))) - continue - config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True)) - running_config_rules.append(config_rule) + config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True)) + running_config_rules.append(config_rule) - #for running_config_rule in running_config_rules: - # LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule))) - update_config(self.database, device_uuid, 'running', running_config_rules) + #for running_config_rule in running_config_rules: + # LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule))) + update_config(self.database, device_uuid, 'running', running_config_rules) - initial_config_rules = driver.GetInitialConfig() - update_config(self.database, device_uuid, 'initial', initial_config_rules) + initial_config_rules = driver.GetInitialConfig() + update_config(self.database, device_uuid, 'initial', initial_config_rules) - #LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump( - # include_config_rules=True, include_drivers=True, include_endpoints=True)))) + #LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump( + # include_config_rules=True, include_drivers=True, include_endpoints=True)))) - sync_device_to_context(db_device, self.context_client) - return DeviceId(**db_device.dump_id()) + sync_device_to_context(db_device, self.context_client) + return DeviceId(**db_device.dump_id()) + finally: + self.mutex_queues.signal_done(device_uuid) @safe_and_metered_rpc_method(METRICS, LOGGER) def ConfigureDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: device_id = request.device_id device_uuid = device_id.device_uuid.uuid - sync_device_from_context(device_uuid, self.context_client, self.database) + self.mutex_queues.wait_my_turn(device_uuid) + try: + sync_device_from_context(device_uuid, self.context_client, self.database) - context_config_rules = get_config_rules(self.database, device_uuid, 'running') - context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules} - #LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules))) + context_config_rules = get_config_rules(self.database, device_uuid, 'running') + context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules} + #LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules))) - db_device,_ = update_device_in_local_database(self.database, request) + db_device,_ = update_device_in_local_database(self.database, request) - request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) - #LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules))) + request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) + #LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules))) - resources_to_set : List[Tuple[str, Any]] = [] # key, value - resources_to_delete : List[Tuple[str, Any]] = [] # key, value + resources_to_set : List[Tuple[str, Any]] = [] # key, value + resources_to_delete : List[Tuple[str, Any]] = [] # key, value - for config_rule in request_config_rules: - action, key, value = config_rule - if action == ORM_ConfigActionEnum.SET: - if (key not in context_config_rules) or (context_config_rules[key] != value): - resources_to_set.append((key, value)) - elif action == ORM_ConfigActionEnum.DELETE: - if key in context_config_rules: - resources_to_delete.append((key, value)) + for config_rule in request_config_rules: + action, key, value = config_rule + if action == ORM_ConfigActionEnum.SET: + if (key not in context_config_rules) or (context_config_rules[key] != value): + resources_to_set.append((key, value)) + elif action == ORM_ConfigActionEnum.DELETE: + if key in context_config_rules: + resources_to_delete.append((key, value)) - #LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set))) - #LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete))) + #LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set))) + #LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete))) - # TODO: use of datastores (might be virtual ones) to enable rollbacks + # TODO: use of datastores (might be virtual ones) to enable rollbacks - errors = [] + errors = [] - driver : _Driver = self.driver_instance_cache.get(device_uuid) - if driver is None: - errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid))) + driver : _Driver = self.driver_instance_cache.get(device_uuid) + if driver is None: + errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid))) + + if len(errors) == 0: + results_setconfig = driver.SetConfig(resources_to_set) + errors.extend(check_set_errors(resources_to_set, results_setconfig)) - if len(errors) == 0: - results_setconfig = driver.SetConfig(resources_to_set) - errors.extend(check_set_errors(resources_to_set, results_setconfig)) + if len(errors) == 0: + results_deleteconfig = driver.DeleteConfig(resources_to_delete) + errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig)) - if len(errors) == 0: - results_deleteconfig = driver.DeleteConfig(resources_to_delete) - errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig)) + if len(errors) > 0: + raise OperationFailedException('ConfigureDevice', extra_details=errors) - if len(errors) > 0: - raise OperationFailedException('ConfigureDevice', extra_details=errors) + running_config_rules = driver.GetConfig() + running_config_rules = [ + (ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True)) + for config_rule in running_config_rules if not isinstance(config_rule[1], Exception) + ] + #for running_config_rule in running_config_rules: + # LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule))) + update_config(self.database, device_uuid, 'running', running_config_rules) - running_config_rules = driver.GetConfig() - running_config_rules = [ - (ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True)) - for config_rule in running_config_rules - ] - #for running_config_rule in running_config_rules: - # LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule))) - update_config(self.database, device_uuid, 'running', running_config_rules) + sync_device_to_context(db_device, self.context_client) + return DeviceId(**db_device.dump_id()) + finally: + self.mutex_queues.signal_done(device_uuid) - sync_device_to_context(db_device, self.context_client) - return DeviceId(**db_device.dump_id()) @safe_and_metered_rpc_method(METRICS, LOGGER) def DeleteDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: device_uuid = request.device_uuid.uuid - self.monitoring_loops.remove(device_uuid) + self.mutex_queues.wait_my_turn(device_uuid) + try: + self.monitoring_loops.remove(device_uuid) - sync_device_from_context(device_uuid, self.context_client, self.database) - db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) - if db_device is None: return Empty() + sync_device_from_context(device_uuid, self.context_client, self.database) + db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) + if db_device is None: return Empty() - self.driver_instance_cache.delete(device_uuid) - delete_device_from_context(db_device, self.context_client) + self.driver_instance_cache.delete(device_uuid) + delete_device_from_context(db_device, self.context_client) - for db_kpi_pk,_ in db_device.references(KpiModel): - db_kpi = get_object(self.database, KpiModel, db_kpi_pk) - for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel): - get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete() - db_kpi.delete() + for db_kpi_pk,_ in db_device.references(KpiModel): + db_kpi = get_object(self.database, KpiModel, db_kpi_pk) + for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel): + get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete() + db_kpi.delete() - for db_endpoint_pk,_ in db_device.references(EndPointModel): - db_endpoint = EndPointModel(self.database, db_endpoint_pk) - for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel): - get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete() - db_endpoint.delete() + for db_endpoint_pk,_ in db_device.references(EndPointModel): + db_endpoint = EndPointModel(self.database, db_endpoint_pk) + for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel): + get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete() + db_endpoint.delete() - for db_driver_pk,_ in db_device.references(DriverModel): - get_object(self.database, DriverModel, db_driver_pk).delete() + for db_driver_pk,_ in db_device.references(DriverModel): + get_object(self.database, DriverModel, db_driver_pk).delete() - db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk) - for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel): - get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete() + db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk) + for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel): + get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete() - db_running_config = ConfigModel(self.database, db_device.device_running_config_fk) - for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel): - get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete() + db_running_config = ConfigModel(self.database, db_device.device_running_config_fk) + for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel): + get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete() - db_device.delete() - db_initial_config.delete() - db_running_config.delete() - return Empty() + db_device.delete() + db_initial_config.delete() + db_running_config.delete() + return Empty() + finally: + self.mutex_queues.signal_done(device_uuid) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetInitialConfig(self, request : DeviceId, context : grpc.ServicerContext) -> DeviceConfig: device_uuid = request.device_uuid.uuid - sync_device_from_context(device_uuid, self.context_client, self.database) - db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) + self.mutex_queues.wait_my_turn(device_uuid) + try: + sync_device_from_context(device_uuid, self.context_client, self.database) + db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) - config_rules = {} if db_device is None else db_device.dump_initial_config() - return DeviceConfig(config_rules=config_rules) + config_rules = {} if db_device is None else db_device.dump_initial_config() + device_config = DeviceConfig(config_rules=config_rules) + return device_config + finally: + self.mutex_queues.signal_done(device_uuid) @safe_and_metered_rpc_method(METRICS, LOGGER) def MonitorDeviceKpi(self, request : MonitoringSettings, context : grpc.ServicerContext) -> Empty: kpi_uuid = request.kpi_id.kpi_id.uuid + device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid + self.mutex_queues.wait_my_turn(device_uuid) + try: + subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0) + if subscribe: + db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) + if db_device is None: + msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + endpoint_id = request.kpi_descriptor.endpoint_id + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) + endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: + str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) + str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') + db_endpoint : EndPointModel = get_object( + self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False) + if db_endpoint is None: + msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format( + str(device_uuid), str(endpoint_uuid), str(str_endpoint_key)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + driver : _Driver = self.driver_instance_cache.get(device_uuid) + if driver is None: + msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + sample_type = request.kpi_descriptor.kpi_sample_type + + attributes = { + 'kpi_uuid' : request.kpi_id.kpi_id.uuid, + 'kpi_description' : request.kpi_descriptor.kpi_description, + 'kpi_sample_type' : grpc_to_enum__kpi_sample_type(sample_type), + 'device_fk' : db_device, + 'endpoint_fk' : db_endpoint, + 'sampling_duration': request.sampling_duration_s, + 'sampling_interval': request.sampling_interval_s, + } + result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes) + db_kpi, updated = result + + str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) + db_endpoint_monitor : EndPointMonitorModel = get_object( + self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False) + if db_endpoint_monitor is None: + msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format( + str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')), + str(device_uuid), str(endpoint_uuid)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key) + str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':') + attributes = { + 'endpoint_monitor_fk': db_endpoint_monitor, + 'kpi_fk' : db_kpi, + } + result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object( + self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes) + db_endpoint_monitor_kpi, updated = result + + resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval + resources_to_subscribe.append( + (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval)) + results_subscribestate = driver.SubscribeState(resources_to_subscribe) + errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate) + if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors) + + self.monitoring_loops.add(device_uuid, driver) - subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0) - if subscribe: - device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid - - db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) - if db_device is None: - msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - endpoint_id = request.kpi_descriptor.endpoint_id - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - db_endpoint : EndPointModel = get_object( - self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False) - if db_endpoint is None: - msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format( - str(device_uuid), str(endpoint_uuid), str(str_endpoint_key)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - driver : _Driver = self.driver_instance_cache.get(device_uuid) - if driver is None: - msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - sample_type = request.kpi_descriptor.kpi_sample_type - - attributes = { - 'kpi_uuid' : request.kpi_id.kpi_id.uuid, - 'kpi_description' : request.kpi_descriptor.kpi_description, - 'kpi_sample_type' : grpc_to_enum__kpi_sample_type(sample_type), - 'device_fk' : db_device, - 'endpoint_fk' : db_endpoint, - 'sampling_duration': request.sampling_duration_s, - 'sampling_interval': request.sampling_interval_s, - } - result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes) - db_kpi, updated = result - - str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) - db_endpoint_monitor : EndPointMonitorModel = get_object( - self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False) - if db_endpoint_monitor is None: - msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format( - str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')), - str(device_uuid), str(endpoint_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key) - str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':') - attributes = { - 'endpoint_monitor_fk': db_endpoint_monitor, - 'kpi_fk' : db_kpi, - } - result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object( - self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes) - db_endpoint_monitor_kpi, updated = result - - resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval - resources_to_subscribe.append( - (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval)) - results_subscribestate = driver.SubscribeState(resources_to_subscribe) - errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate) - if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors) - - self.monitoring_loops.add(device_uuid, driver) - - else: - db_kpi : KpiModel = get_object( - self.database, KpiModel, kpi_uuid, raise_if_not_found=False) - if db_kpi is None: - msg = 'Kpi({:s}) not found'.format(str(kpi_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - db_device : DeviceModel = get_object( - self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False) - if db_device is None: - msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - device_uuid = db_device.device_uuid - - db_endpoint : EndPointModel = get_object( - self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False) - if db_endpoint is None: - msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - endpoint_uuid = db_endpoint.endpoint_uuid - str_endpoint_key = db_endpoint.pk - - kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type - sample_type = kpi_sample_type.value - str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) - db_endpoint_monitor : EndPointMonitorModel = get_object( - self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False) - if db_endpoint_monitor is None: - msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key) - str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':') - db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object( - self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False) - if db_endpoint_monitor_kpi is None: - msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval - resources_to_unsubscribe.append( - (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval)) - - driver : _Driver = self.driver_instance_cache.get(device_uuid) - if driver is None: - msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe) - errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate) - if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors) - - db_endpoint_monitor_kpi.delete() - db_kpi.delete() - - # There is one monitoring loop per device; keep them active since they are re-used by different monitoring - # requests. - #self.monitoring_loops.remove(device_uuid) - - # Subscriptions are not stored as classical driver config. - # TODO: consider adding it somehow in the configuration. - # Warning: GetConfig might be very slow in OpenConfig devices - #running_config_rules = [ - # (config_rule[0], json.dumps(config_rule[1], sort_keys=True)) - # for config_rule in driver.GetConfig() - #] - #context_config_rules = { - # config_rule[1]: config_rule[2] - # for config_rule in get_config_rules(self.database, device_uuid, 'running') - #} - - ## each in context, not in running => delete in context - ## each in running, not in context => add to context - ## each in context and in running, context.value != running.value => update in context - #running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = [] - #for config_rule_key,config_rule_value in running_config_rules: - # running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value)) - # context_config_rules.pop(config_rule_key, None) - #for context_rule_key,context_rule_value in context_config_rules.items(): - # running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value)) - - ##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}' - ##for i,running_config_rules_action in enumerate(running_config_rules_actions): - ## LOGGER.info(msg.format(i, str(running_config_rules_action))) - #update_config(self.database, device_uuid, 'running', running_config_rules_actions) - - sync_device_to_context(db_device, self.context_client) - return Empty() + else: + db_kpi : KpiModel = get_object( + self.database, KpiModel, kpi_uuid, raise_if_not_found=False) + if db_kpi is None: + msg = 'Kpi({:s}) not found'.format(str(kpi_uuid)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + db_device : DeviceModel = get_object( + self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False) + if db_device is None: + msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + device_uuid = db_device.device_uuid + + db_endpoint : EndPointModel = get_object( + self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False) + if db_endpoint is None: + msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + endpoint_uuid = db_endpoint.endpoint_uuid + str_endpoint_key = db_endpoint.pk + + kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type + sample_type = kpi_sample_type.value + str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) + db_endpoint_monitor : EndPointMonitorModel = get_object( + self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False) + if db_endpoint_monitor is None: + msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key) + str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':') + db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object( + self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False) + if db_endpoint_monitor_kpi is None: + msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval + resources_to_unsubscribe.append( + (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval)) + + driver : _Driver = self.driver_instance_cache.get(device_uuid) + if driver is None: + msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe) + errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate) + if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors) + + db_endpoint_monitor_kpi.delete() + db_kpi.delete() + + # There is one monitoring loop per device; keep them active since they are re-used by different monitoring + # requests. + #self.monitoring_loops.remove(device_uuid) + + # Subscriptions are not stored as classical driver config. + # TODO: consider adding it somehow in the configuration. + # Warning: GetConfig might be very slow in OpenConfig devices + #running_config_rules = [ + # (config_rule[0], json.dumps(config_rule[1], sort_keys=True)) + # for config_rule in driver.GetConfig() + #] + #context_config_rules = { + # config_rule[1]: config_rule[2] + # for config_rule in get_config_rules(self.database, device_uuid, 'running') + #} + + ## each in context, not in running => delete in context + ## each in running, not in context => add to context + ## each in context and in running, context.value != running.value => update in context + #running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = [] + #for config_rule_key,config_rule_value in running_config_rules: + # running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value)) + # context_config_rules.pop(config_rule_key, None) + #for context_rule_key,context_rule_value in context_config_rules.items(): + # running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value)) + + ##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}' + ##for i,running_config_rules_action in enumerate(running_config_rules_actions): + ## LOGGER.info(msg.format(i, str(running_config_rules_action))) + #update_config(self.database, device_uuid, 'running', running_config_rules_actions) + + sync_device_to_context(db_device, self.context_client) + return Empty() + finally: + self.mutex_queues.signal_done(device_uuid) diff --git a/src/device/service/__main__.py b/src/device/service/__main__.py index 1f0adfa8f1dd8b3e307ed202967b1d5195171f11..5c9b41531e7bc579cbe5cc563f20b193f6bc5a90 100644 --- a/src/device/service/__main__.py +++ b/src/device/service/__main__.py @@ -34,7 +34,7 @@ def main(): global LOGGER # pylint: disable=global-statement log_level = get_log_level() - logging.basicConfig(level=log_level) + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING) logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) logging.getLogger('monitoring-client').setLevel(logging.WARNING) diff --git a/src/device/service/database/EndPointModel.py b/src/device/service/database/EndPointModel.py index 84d0c97073481af162b1e66f7e35c93bc6e1eed5..3d4435737349809c527c80546ed412e621afcbdd 100644 --- a/src/device/service/database/EndPointModel.py +++ b/src/device/service/database/EndPointModel.py @@ -34,7 +34,6 @@ class EndPointModel(Model): device_fk = ForeignKeyField(DeviceModel) endpoint_uuid = StringField(required=True, allow_empty=False) endpoint_type = StringField() - resource_key = StringField(required=True, allow_empty=False) def dump_id(self) -> Dict: device_id = DeviceModel(self.database, self.device_fk).dump_id() @@ -74,13 +73,7 @@ def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc for kpi_sample_type in grpc_endpoint_kpi_sample_types: orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)]) - #db_endpoint_kpi_sample_type = EndPointMonitorModel(database, str_endpoint_kpi_sample_type_key) - #db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint - #db_endpoint_kpi_sample_type.resource_key = '' # during initialization, allow empty value - #db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type - #db_endpoint_kpi_sample_type.save() update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, { 'endpoint_fk' : db_endpoint, - #'resource_key' : '', # during initialization, allow empty value 'kpi_sample_type': orm_kpi_sample_type, }) diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py index 7dbb9eddb238dcaae9d00b579a1851aacf53225d..371f4cccb4e002e4d232823e47e31f577d1a4285 100644 --- a/src/device/service/driver_api/_Driver.py +++ b/src/device/service/driver_api/_Driver.py @@ -15,16 +15,18 @@ import threading from typing import Any, Iterator, List, Optional, Tuple, Union -# Special resource names to request to the driver to retrieve the specified configuration/structural resources. +# Special resource names to request to the driver to retrieve the specified +# configuration/structural resources. # These resource names should be used with GetConfig() method. -RESOURCE_ENDPOINTS = '__endpoints__' -RESOURCE_INTERFACES = '__interfaces__' +RESOURCE_ENDPOINTS = '__endpoints__' +RESOURCE_INTERFACES = '__interfaces__' RESOURCE_NETWORK_INSTANCES = '__network_instances__' -RESOURCE_ROUTING_POLICIES = '__routing_policies__' -RESOURCE_ACL = '__acl__' +RESOURCE_ROUTING_POLICIES = '__routing_policies__' +RESOURCE_ACL = '__acl__' + class _Driver: - def __init__(self, address : str, port : int, **settings) -> None: + def __init__(self, address: str, port: int, **settings) -> None: """ Initialize Driver. Parameters: address : str @@ -56,92 +58,122 @@ class _Driver: """ Retrieve initial configuration of entire device. Returns: values : List[Tuple[str, Any]] - List of tuples (resource key, resource value) for resource keys. + List of tuples (resource key, resource value) for + resource keys. """ raise NotImplementedError() - def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: - """ Retrieve running configuration of entire device, or selected resource keys. + def GetConfig(self, resource_keys: List[str] = []) -> \ + List[Tuple[str, Union[Any, None, Exception]]]: + """ Retrieve running configuration of entire device or + selected resource keys. Parameters: resource_keys : List[str] List of keys pointing to the resources to be retrieved. Returns: values : List[Tuple[str, Union[Any, None, Exception]]] - List of tuples (resource key, resource value) for resource keys requested. If a resource is found, - the appropriate value type must be retrieved. If a resource is not found, None must be retrieved as - value for that resource. In case of Exception, the Exception must be retrieved as value. + List of tuples (resource key, resource value) for + resource keys requested. If a resource is found, + the appropriate value type must be retrieved. + If a resource is not found, None must be retrieved as + value for that resource. In case of Exception, + the Exception must be retrieved as value. """ raise NotImplementedError() - def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + def SetConfig(self, resources: List[Tuple[str, Any]]) -> \ + List[Union[bool, Exception]]: """ Create/Update configuration for a list of resources. Parameters: resources : List[Tuple[str, Any]] - List of tuples, each containing a resource_key pointing the resource to be modified, and a - resource_value containing the new value to be set. + List of tuples, each containing a resource_key pointing the + resource to be modified, and a resource_value containing + the new value to be set. Returns: results : List[Union[bool, Exception]] - List of results for resource key changes requested. Return values must be in the same order than - resource keys requested. If a resource is properly set, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + List of results for resource key changes requested. + Return values must be in the same order as the + resource keys requested. If a resource is properly set, + True must be retrieved; otherwise, the Exception that is + raised during the processing must be retrieved. """ raise NotImplementedError() - def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> \ + List[Union[bool, Exception]]: """ Delete configuration for a list of resources. Parameters: resources : List[Tuple[str, Any]] - List of tuples, each containing a resource_key pointing the resource to be modified, and a - resource_value containing possible additionally required values to locate the value to be removed. + List of tuples, each containing a resource_key pointing the + resource to be modified, and a resource_value containing + possible additionally required values to locate + the value to be removed. Returns: - results : List[bool] - List of results for resource key deletions requested. Return values must be in the same order than - resource keys requested. If a resource is properly deleted, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results : List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the resource keys + requested. If a resource is properly deleted, True must be + retrieved; otherwise, the Exception that is raised during + the processing must be retrieved. """ raise NotImplementedError() - def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: - """ Subscribe to state information of entire device, or selected resources. Subscriptions are incremental. + def SubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> \ + List[Union[bool, Exception]]: + """ Subscribe to state information of entire device or + selected resources. Subscriptions are incremental. Driver should keep track of requested resources. Parameters: subscriptions : List[Tuple[str, float, float]] - List of tuples, each containing a resource_key pointing the resource to be subscribed, a - sampling_duration, and a sampling_interval (both in seconds with float representation) defining, - respectively, for how long monitoring should last, and the desired monitoring interval for the - resource specified. + List of tuples, each containing a resource_key pointing the + resource to be subscribed, a sampling_duration, and a + sampling_interval (both in seconds with float + representation) defining, respectively, for how long + monitoring should last, and the desired monitoring interval + for the resource specified. Returns: - results : List[bool] - List of results for resource key subscriptions requested. Return values must be in the same order - than resource keys requested. If a resource is properly subscribed, True must be retrieved; - otherwise, the Exception that is raised during the processing must be retrieved. + results : List[Union[bool, Exception]] + List of results for resource key subscriptions requested. + Return values must be in the same order as the resource keys + requested. If a resource is properly subscribed, + True must be retrieved; otherwise, the Exception that is + raised during the processing must be retrieved. """ raise NotImplementedError() - def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: - """ Unsubscribe from state information of entire device, or selected resources. Subscriptions are incremental. + def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]]) \ + -> List[Union[bool, Exception]]: + """ Unsubscribe from state information of entire device + or selected resources. Subscriptions are incremental. Driver should keep track of requested resources. Parameters: subscriptions : List[str] - List of tuples, each containing a resource_key pointing the resource to be subscribed, a - sampling_duration, and a sampling_interval (both in seconds with float representation) defining, - respectively, for how long monitoring should last, and the desired monitoring interval for the - resource specified. + List of tuples, each containing a resource_key pointing the + resource to be subscribed, a sampling_duration, and a + sampling_interval (both in seconds with float + representation) defining, respectively, for how long + monitoring should last, and the desired monitoring interval + for the resource specified. Returns: results : List[Union[bool, Exception]] - List of results for resource key unsubscriptions requested. Return values must be in the same order - than resource keys requested. If a resource is properly unsubscribed, True must be retrieved; - otherwise, the Exception that is raised during the processing must be retrieved. + List of results for resource key un-subscriptions requested. + Return values must be in the same order as the resource keys + requested. If a resource is properly unsubscribed, + True must be retrieved; otherwise, the Exception that is + raised during the processing must be retrieved. """ raise NotImplementedError() def GetState( self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - """ Retrieve last collected values for subscribed resources. Operates as a generator, so this method should be - called once and will block until values are available. When values are available, it should yield each of - them and block again until new values are available. When the driver is destroyed, GetState() can return - instead of yield to terminate the loop. Terminate enables to request interruption of the generation. + """ Retrieve last collected values for subscribed resources. + Operates as a generator, so this method should be called once and will + block until values are available. When values are available, + it should yield each of them and block again until new values are + available. When the driver is destroyed, GetState() can return instead + of yield to terminate the loop. + Terminate enables to request interruption of the generation. Examples: # keep looping waiting for extra samples (generator loop) terminate = threading.Event() @@ -161,20 +193,27 @@ class _Driver: if i == 10: terminate.set() Parameters: blocking : bool - Select the driver behaviour. In both cases, the driver will first retrieve the samples accumulated - and available in the internal queue. Then, if blocking, the driver does not terminate the loop and - waits for additional samples to come, thus behaving as a generator. If non-blocking, the driver - terminates the loop and returns. Non-blocking behaviour can be used for periodically polling the - driver, while blocking can be used when a separate thread is in charge of collecting the samples - produced by the driver. + Select the driver behaviour. In both cases, the driver will + first retrieve the samples accumulated and available in the + internal queue. Then, if blocking, the driver does not + terminate the loop and waits for additional samples to come, + thus behaving as a generator. If non-blocking, the driver + terminates the loop and returns. Non-blocking behaviour can + be used for periodically polling the driver, while blocking + can be used when a separate thread is in charge of + collecting the samples produced by the driver. terminate : threading.Event - Signals the interruption of the GetState method as soon as possible. + Signals the interruption of the GetState method as soon as + possible. Returns: results : Iterator[Tuple[float, str, Any]] - Sequences of state sample. Each State sample contains a float Unix-like timestamps of the samples in - seconds with up to microsecond resolution, the resource_key of the sample, and its resource_value. - Only resources with an active subscription must be retrieved. Interval and duration of the sampling - process are specified when creating the subscription using method SubscribeState(). Order of values - yielded is arbitrary. + Sequences of state sample. Each State sample contains a + float Unix-like timestamps of the samples in seconds with up + to microsecond resolution, the resource_key of the sample, + and its resource_value. + Only resources with an active subscription must be + retrieved. Interval and duration of the sampling process are + specified when creating the subscription using method + SubscribeState(). Order of values yielded is arbitrary. """ raise NotImplementedError() diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index dd41096ec25fb74f1b1b855c98f90e09fee33194..9342e650b9fadb21fa1b65fb951a08ae6f066a3c 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -61,11 +61,13 @@ class NetconfSessionHandler: self.__port = int(port) self.__username = settings.get('username') self.__password = settings.get('password') + self.__vendor = settings.get('vendor') self.__key_filename = settings.get('key_filename') self.__hostkey_verify = settings.get('hostkey_verify', True) self.__look_for_keys = settings.get('look_for_keys', True) self.__allow_agent = settings.get('allow_agent', True) self.__force_running = settings.get('force_running', False) + self.__commit_per_delete = settings.get('delete_rule', False) self.__device_params = settings.get('device_params', {}) self.__manager_params = settings.get('manager_params', {}) self.__nc_params = settings.get('nc_params', {}) @@ -90,6 +92,12 @@ class NetconfSessionHandler: @property def use_candidate(self): return self.__candidate_supported and not self.__force_running + @property + def commit_per_rule(self): return self.__commit_per_delete + + @property + def vendor(self): return self.__vendor + @RETRY_DECORATOR def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin with self.__lock: @@ -181,8 +189,9 @@ def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples : LOGGER.exception('Error retrieving samples') def edit_config( - netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, target='running', - default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin + netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False, + target='running', default_operation='merge', test_option=None, error_option=None, + format='xml' # pylint: disable=redefined-builtin ): str_method = 'DeleteConfig' if delete else 'SetConfig' LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources))) @@ -195,13 +204,16 @@ def edit_config( chk_length(str_resource_name, resource, min_length=2, max_length=2) resource_key,resource_value = resource chk_string(str_resource_name + '.key', resource_key, allow_empty=False) - str_config_message = compose_config(resource_key, resource_value, delete=delete) + str_config_message = compose_config( + resource_key, resource_value, delete=delete, vendor=netconf_handler.vendor) if str_config_message is None: raise UnsupportedResourceKeyException(resource_key) LOGGER.info('[{:s}] str_config_message[{:d}] = {:s}'.format( str_method, len(str_config_message), str(str_config_message))) netconf_handler.edit_config( config=str_config_message, target=target, default_operation=default_operation, test_option=test_option, error_option=error_option, format=format) + if commit_per_rule: + netconf_handler.commit() results[i] = True except Exception as e: # pylint: disable=broad-except str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting') @@ -278,12 +290,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate') - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate') + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources) return results @@ -294,12 +309,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources, delete=True) return results diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py index c11b1669d5b4cf3ca47986817ded28f75ae8358f..718a02d193531924bef863f5ccd2cbb999388dbd 100644 --- a/src/device/service/drivers/openconfig/templates/EndPoints.py +++ b/src/device/service/drivers/openconfig/templates/EndPoints.py @@ -20,7 +20,7 @@ from .Tools import add_value_from_collection, add_value_from_tag LOGGER = logging.getLogger(__name__) -XPATH_PORTS = "//ocp:components/ocp:component/ocp:state[ocp:type='PORT']/.." +XPATH_PORTS = "//ocp:components/ocp:component" XPATH_IFACE_COUNTER = "//oci:interfaces/oci:interface[oci:name='{:s}']/state/counters/{:s}" def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: @@ -28,6 +28,13 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: for xml_component in xml_data.xpath(XPATH_PORTS, namespaces=NAMESPACES): #LOGGER.info('xml_component = {:s}'.format(str(ET.tostring(xml_component)))) + component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES) + if component_type is None or component_type.text is None: continue + component_type = component_type.text + if component_type not in {'PORT', 'oc-platform-types:PORT'}: continue + + LOGGER.info('PORT xml_component = {:s}'.format(str(ET.tostring(xml_component)))) + endpoint = {} component_name = xml_component.find('ocp:name', namespaces=NAMESPACES) diff --git a/src/device/service/drivers/openconfig/templates/Interfaces.py b/src/device/service/drivers/openconfig/templates/Interfaces.py index 33f977524c6f65655fbe17f6d2d95a7cfc223967..3f5b104f2de01137c2424e776dc60b8416088de6 100644 --- a/src/device/service/drivers/openconfig/templates/Interfaces.py +++ b/src/device/service/drivers/openconfig/templates/Interfaces.py @@ -37,6 +37,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: #interface_type = xml_interface.find('oci:config/oci:type', namespaces=NAMESPACES) #add_value_from_tag(interface, 'type', interface_type) + interface_type = xml_interface.find('oci:config/oci:type', namespaces=NAMESPACES) + interface_type.text = interface_type.text.replace('ianaift:','') + add_value_from_tag(interface, 'type', interface_type) + interface_mtu = xml_interface.find('oci:config/oci:mtu', namespaces=NAMESPACES) add_value_from_tag(interface, 'mtu', interface_mtu, cast=int) @@ -49,12 +53,15 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: subinterface = {} add_value_from_tag(subinterface, 'name', interface_name) + add_value_from_tag(subinterface, 'mtu', interface_mtu) + add_value_from_tag(subinterface, 'type', interface_type) + subinterface_index = xml_subinterface.find('oci:index', namespaces=NAMESPACES) if subinterface_index is None or subinterface_index.text is None: continue add_value_from_tag(subinterface, 'index', subinterface_index, cast=int) - vlan_id = xml_subinterface.find('ocv:vlan/ocv:config/ocv:vlan-id', namespaces=NAMESPACES) + vlan_id = xml_subinterface.find('ocv:vlan/ocv:match/ocv:single-tagged/ocv:config/ocv:vlan-id', namespaces=NAMESPACES) add_value_from_tag(subinterface, 'vlan_id', vlan_id, cast=int) # TODO: implement support for multiple IP addresses per subinterface diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py index b091a0d206195a6c2ce94008628071cd9e30944f..8399402fa76b8b6b00829493cc8ebd28fd6018f4 100644 --- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py +++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py @@ -27,6 +27,9 @@ XPATH_NI_IIP_AP = ".//ocni:inter-instance-policies/ocni:apply-policy" XPATH_NI_IIP_AP_IMPORT = ".//ocni:config/ocni:import-policy" XPATH_NI_IIP_AP_EXPORT = ".//ocni:config/ocni:export-policy" +XPATH_NI_CPOINTS = ".//ocni:connection-points/ocni:connection-point" +XPATH_NI_CPOINTS_ENDPOINT = ".//ocni:endpoints/ocni:endpoint/ocni:remote/ocni:config" + def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: response = [] for xml_network_instance in xml_data.xpath(XPATH_NETWORK_INSTANCES, namespaces=NAMESPACES): @@ -39,10 +42,11 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: add_value_from_tag(network_instance, 'name', ni_name) ni_type = xml_network_instance.find('ocni:config/ocni:type', namespaces=NAMESPACES) + ni_type.text = ni_type.text.replace('oc-ni-types:','') add_value_from_tag(network_instance, 'type', ni_type) - #ni_router_id = xml_network_instance.find('ocni:config/ocni:router-id', namespaces=NAMESPACES) - #add_value_from_tag(network_instance, 'router_id', ni_router_id) + ni_router_id = xml_network_instance.find('ocni:config/ocni:router-id', namespaces=NAMESPACES) + add_value_from_tag(network_instance, 'router_id', ni_router_id) ni_route_dist = xml_network_instance.find('ocni:config/ocni:route-distinguisher', namespaces=NAMESPACES) add_value_from_tag(network_instance, 'route_distinguisher', ni_route_dist) @@ -53,6 +57,20 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: if len(network_instance) == 0: continue response.append(('/network_instance[{:s}]'.format(network_instance['name']), network_instance)) + for xml_cpoints in xml_network_instance.xpath(XPATH_NI_PROTOCOLS, namespaces=NAMESPACES): + cpoint = {} + add_value_from_tag(cpoint, 'name', ni_name) + + connection_point = xml_cpoints.find('ocni:connection-point-id', namespaces=NAMESPACES) + add_value_from_tag(cpoint, 'connection_point', connection_point) + + for xml_endpoint in xml_cpoints.xpath(XPATH_NI_CPOINTS_ENDPOINT, namespaces=NAMESPACES): + remote_system = xml_endpoint.find('ocni:remote-system', namespaces=NAMESPACES) + add_value_from_tag(cpoint, 'remote_system', remote_system) + + VC_ID = xml_endpoint.find('ocni:virtual-circuit-identifier', namespaces=NAMESPACES) + add_value_from_tag(cpoint, 'VC_ID', VC_ID) + for xml_protocol in xml_network_instance.xpath(XPATH_NI_PROTOCOLS, namespaces=NAMESPACES): #LOGGER.info('xml_protocol = {:s}'.format(str(ET.tostring(xml_protocol)))) @@ -71,6 +89,8 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: if protocol['identifier'] == 'BGP': bgp_as = xml_protocol.find('ocni:bgp/ocni:global/ocni:config/ocni:as', namespaces=NAMESPACES) add_value_from_tag(protocol, 'as', bgp_as, cast=int) + bgp_id = xml_protocol.find('ocni:bgp/ocni:global/ocni:config/ocni:router-id', namespaces=NAMESPACES) + add_value_from_tag(protocol, 'router_id', bgp_id) resource_key = '/network_instance[{:s}]/protocols[{:s}]'.format( network_instance['name'], protocol['identifier']) @@ -94,7 +114,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: add_value_from_tag(table_connection, 'address_family', address_family, cast=lambda s: s.replace('oc-types:', '')) - default_import_policy = xml_table_connection.find('ocni:default-import-policy', namespaces=NAMESPACES) + default_import_policy = xml_table_connection.find('ocni:config/ocni:default-import-policy', namespaces=NAMESPACES) add_value_from_tag(table_connection, 'default_import_policy', default_import_policy) resource_key = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format( @@ -125,4 +145,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: iip_ap['name'], iip_ap['export_policy']) response.append((resource_key, iip_ap)) + + + return response diff --git a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py index 369732de3fe58c52a2e9ab2227899160d091ff68..068ca5430d9135e784dbe9a07f80d81472cbf5cc 100644 --- a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py +++ b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py @@ -74,7 +74,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: resource_key = '/routing_policy/bgp_defined_set[{:s}]'.format(bgp_ext_community_set['ext_community_set_name']) response.append((resource_key, copy.deepcopy(bgp_ext_community_set))) - ext_community_member = xml_bgp_ext_community_set.find('ocbp:ext-community-member', namespaces=NAMESPACES) + ext_community_member = xml_bgp_ext_community_set.find('ocbp:config/ocbp:ext-community-member', namespaces=NAMESPACES) if ext_community_member is not None and ext_community_member.text is not None: add_value_from_tag(bgp_ext_community_set, 'ext_community_member', ext_community_member) diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py index 901f5cf0291dca1bda155e20abd16db5989df7dc..5e77b25fe3206407db9427085de70b95342d370a 100644 --- a/src/device/service/drivers/openconfig/templates/__init__.py +++ b/src/device/service/drivers/openconfig/templates/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. import json, logging, lxml.etree as ET, re -from typing import Any, Dict +from typing import Any, Dict, Optional from jinja2 import Environment, PackageLoader, select_autoescape from device.service.driver_api._Driver import ( RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_ACL) @@ -77,9 +77,11 @@ def parse(resource_key : str, xml_data : ET.Element): if parser is None: return [(resource_key, xml_data)] return parser(xml_data) -def compose_config(resource_key : str, resource_value : str, delete : bool = False) -> str: +def compose_config( + resource_key : str, resource_value : str, delete : bool = False, vendor : Optional[str] = None +) -> str: template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key)) template = JINJA_ENV.get_template(template_name) data : Dict[str, Any] = json.loads(resource_value) operation = 'delete' if delete else 'merge' - return '{:s}'.format(template.render(**data, operation=operation).strip()) + return '{:s}'.format(template.render(**data, operation=operation, vendor=vendor).strip()) diff --git a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml index fac259b6fdcd3cbded93088ddc6335ea2bfe5f69..2769e8b2e9f81326332ae175f915432b7337f24c 100644 --- a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml @@ -13,6 +13,16 @@ {{sequence_id}} + {% if operation is not defined or operation != 'delete' %} + {% if type=='ACL_L2' %} + + + {% if source_address is defined %}{{source_address}}{% endif%} + {% if destination_address is defined %}{{destination_address}}{% endif%} + + + {% endif%} + {% if type=='ACL_IPV4' %} {% if source_address is defined %}{{source_address}}{% endif%} @@ -29,12 +39,26 @@ {% if tcp_flags is defined %}{{tcp_flags}}{% endif%} + {% endif%} + {% if type=='ACL_IPV6' %} + + + {% if source_address is defined %}{{source_address}}{% endif%} + {% if destination_address is defined %}{{destination_address}}{% endif%} + {% if protocol is defined %}{{protocol}}{% endif%} + {% if dscp is defined %}{{dscp}}{% endif%} + {% if hop_limit is defined %}{{hop_limit}}{% endif%} + + + {% endif%} + {% if forwarding_action is defined %}{{forwarding_action}}{% endif%} {% if log_action is defined %}{{log_action}}{% endif%} + {% endif%} diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml index d987b0cc4b40298533f140f71af83c6fad884020..b070b305a505890c51f3751d2b83eb415ae4aa43 100644 --- a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml @@ -1,18 +1,21 @@ - + {{id}} {{id}} + {% if interface is defined %} {{interface}} {% if subinterface is defined %}{{subinterface}}{% endif%} + {% endif%} + {% if set_name_egress is defined %} - + > {{set_name_egress}} {{type_egress}} @@ -21,6 +24,7 @@ + {% endif%} diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml index 144a03c55477e532379541be5443063fe3aa2f10..d1f18efb26bc1316354c2bb26623cb36f7dc0be6 100644 --- a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml @@ -1,18 +1,21 @@ - + {{id}} {{id}} + {% if interface is defined %} {{interface}} {% if subinterface is defined %}{{subinterface}}{% endif%} + {% endif%} + {% if set_name_ingress is defined %} - + {{set_name_ingress}} {{type_ingress}} @@ -21,6 +24,7 @@ + {% endif%} diff --git a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml index ff15d1d682ea910208237c32adcc93029fb036d8..4bc53ff1ddfbebbdcef2a0b4c37770210726676b 100644 --- a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml @@ -1,14 +1,12 @@ - + {{name}} + {% if operation is defined and operation != 'delete' %} {{name}} - {% if operation is defined and operation == 'delete' %} - {% else %} - {{description}} {{mtu}} - {% endif %} + {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml index d266f819c41355ba8a30086415f2bba3b68f1f3d..1bdb8efbff495f04ee90dadaffaa7412332531b7 100644 --- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml @@ -1,35 +1,46 @@ - + {{name}} - {% if operation is not defined or operation != 'delete' %} {{name}} + ianaift:{{type}} + {% if mtu is defined %}{{mtu}}{% endif%} + true - {% endif %} - + {{index}} - {% if operation is not defined or operation != 'delete' %} {{index}} - true + {{description}} + {% if vendor=="ADVA" and vlan_id is not defined %} + true + {% endif%} + {% if vlan_id is defined %} - - {{vlan_id}} - - - - -
- {{address_ip}} + + - {{address_ip}} - {{address_prefix}} + {{vlan_id}} -
-
-
+ + + + {% endif %} + {% if address_ip is defined %} + + + + {{address_ip}} + + {{address_ip}} + {{address_prefix}} + + + + {% endif %}
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml new file mode 100644 index 0000000000000000000000000000000000000000..60272e5fba4dd87c9bc48ef596197c2508b75e59 --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml @@ -0,0 +1,29 @@ + + + {{name}} + + + {{connection_point}} + + {{connection_point}} + + + + {{connection_point}} + + {{connection_point}} + 1 + oc-ni-types:REMOTE + + + + {{VC_ID}} + {{remote_system}} + + + + + + + + diff --git a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml index 9362c09c6cfebcd1f83b05002f58eda51724b911..17b07df7233e94f16923c5da49eef2b8b5ccda82 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml @@ -5,7 +5,8 @@ {{name}} oc-ni-types:{{type}} - {{description}} + {% if type=='L3VRF' %} + {% if description is defined %}{{description}}{% endif %} {% if router_id is defined %}{{router_id}}{% endif %} {{route_distinguisher}} true @@ -13,8 +14,29 @@ oc-ni-types:MPLS + oc-ni-types:INSTANCE_LABEL + {% endif %} + {% if type=='L2VSI' %} + {% if description is defined %}{{description}}{% endif %} + true + 1500 + + + + oc-ni-types:MPLS + + + + + true + 1000 + 300 + + + {% endif %} + {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml index d5c33d31a6d671216db55c0eded94dc15a56bec8..bf8c0c0770f9344fbed16f3a6b09f7fa99a978ef 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml @@ -2,15 +2,13 @@ {{name}} - + {{id}} - {% if operation is not defined or operation != 'delete' %} {{id}} {{interface}} {{subinterface}} - {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml index da05d0467605e6cec0c3448cc325ff60dfc7cfc9..c9c068e480c0569cfe5f97b78b28fbe03e2595f8 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml @@ -3,19 +3,19 @@ {{name}} - {{identifier}} + oc-pol-types:{{identifier}} {{protocol_name}} {% if operation is not defined or operation != 'delete' %} - {{identifier}} + oc-pol-types:{{identifier}} {{protocol_name}} - true {% if identifier=='BGP' %} {{as}} + {{router_id}} @@ -23,5 +23,18 @@ {% endif %} + {% if operation is not defined or operation != 'delete' %} + + + + oc-pol-types:{{identifier}} + oc-types:IPV4 + + oc-pol-types:{{identifier}} + oc-types:IPV4 + +
+
+ {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml index df64606ae5ab434e5e3453f7294db02bb749bdce..6843c2dcbd306b149a4168565447d11174eceadc 100644 --- a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml @@ -5,7 +5,10 @@ {{ext_community_set_name}} {% if operation is not defined or operation != 'delete' %} - {% if ext_community_member is defined %} {{ext_community_member}}{% endif %} + + {{ext_community_set_name}} + {{ext_community_member}} + {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml index 711067f424b68da0e69913ce01f5133c5cbbfe02..eda2d99c9f6299f7345767db8bed8e8cc58284ae 100644 --- a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml @@ -1,8 +1,11 @@ -{% if operation is not defined or operation != 'delete' %} - + {{policy_name}} + {% if operation is not defined or operation != 'delete' %} + + {{policy_name}} + {{statement_name}} @@ -10,11 +13,13 @@ {{statement_name}} + + oc-pol-types:DIRECTLY_CONNECTED + - + {{ext_community_set_name}} - {{match_set_options}} - + @@ -24,7 +29,7 @@ + {% endif %} -{% endif %} diff --git a/src/device/service/drivers/p4/__init__.py b/src/device/service/drivers/p4/__init__.py index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644 --- a/src/device/service/drivers/p4/__init__.py +++ b/src/device/service/drivers/p4/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/src/device/service/drivers/p4/p4_client.py b/src/device/service/drivers/p4/p4_client.py new file mode 100644 index 0000000000000000000000000000000000000000..600d08880c7e8a1d6a7238e60d66a87d7167bd8c --- /dev/null +++ b/src/device/service/drivers/p4/p4_client.py @@ -0,0 +1,607 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4Runtime client. +""" + +import logging +import queue +import sys +import enum +import threading +from functools import wraps +from typing import NamedTuple +import grpc +import google.protobuf.text_format +from google.rpc import status_pb2, code_pb2 + +from p4.v1 import p4runtime_pb2 +from p4.v1 import p4runtime_pb2_grpc + +STREAM_ATTR_ARBITRATION = "arbitration" +STREAM_ATTR_PACKET = "packet" +STREAM_ATTR_DIGEST = "digest" +STREAM_ATTR_IDLE_NOT = "idle_timeout_notification" +STREAM_ATTR_UNKNOWN = "unknown" + +LOGGER = logging.getLogger(__name__) + + +class P4RuntimeErrorFormatException(Exception): + """ + P4Runtime error format exception. + """ + + +# Used to iterate over the p4.Error messages in a gRPC error Status object +class P4RuntimeErrorIterator: + """ + P4Runtime error iterator. + + Attributes + ---------- + grpc_error : object + gRPC error + """ + + def __init__(self, grpc_error): + assert grpc_error.code() == grpc.StatusCode.UNKNOWN + self.grpc_error = grpc_error + + error = None + # The gRPC Python package does not have a convenient way to access the + # binary details for the error: they are treated as trailing metadata. + for meta in self.grpc_error.trailing_metadata(): + if meta[0] == "grpc-status-details-bin": + error = status_pb2.Status() + error.ParseFromString(meta[1]) + break + if error is None: + raise P4RuntimeErrorFormatException("No binary details field") + + if len(error.details) == 0: + raise P4RuntimeErrorFormatException( + "Binary details field has empty Any details repeated field") + self.errors = error.details + self.idx = 0 + + def __iter__(self): + return self + + def __next__(self): + while self.idx < len(self.errors): + p4_error = p4runtime_pb2.Error() + one_error_any = self.errors[self.idx] + if not one_error_any.Unpack(p4_error): + raise P4RuntimeErrorFormatException( + "Cannot convert Any message to p4.Error") + if p4_error.canonical_code == code_pb2.OK: + continue + val = self.idx, p4_error + self.idx += 1 + return val + raise StopIteration + + +class P4RuntimeWriteException(Exception): + """ + P4Runtime write exception handler. + + Attributes + ---------- + grpc_error : object + gRPC error + """ + + def __init__(self, grpc_error): + assert grpc_error.code() == grpc.StatusCode.UNKNOWN + super().__init__() + self.errors = [] + try: + error_iterator = P4RuntimeErrorIterator(grpc_error) + for error_tuple in error_iterator: + self.errors.append(error_tuple) + except P4RuntimeErrorFormatException as ex: + raise P4RuntimeException(grpc_error) from ex + + def __str__(self): + message = "Error(s) during Write:\n" + for idx, p4_error in self.errors: + code_name = code_pb2._CODE.values_by_number[ + p4_error.canonical_code].name + message += f"\t* At index {idx}: {code_name}, " \ + f"'{p4_error.message}'\n" + return message + + +class P4RuntimeException(Exception): + """ + P4Runtime exception handler. + + Attributes + ---------- + grpc_error : object + gRPC error + """ + + def __init__(self, grpc_error): + super().__init__() + self.grpc_error = grpc_error + + def __str__(self): + message = f"P4Runtime RPC error ({self.grpc_error.code().name}): " \ + f"{self.grpc_error.details()}" + return message + + +def parse_p4runtime_write_error(func): + """ + Parse P4Runtime write error. + + :param func: function + :return: parsed error + """ + + @wraps(func) + def handle(*args, **kwargs): + try: + return func(*args, **kwargs) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.UNKNOWN: + raise ex + raise P4RuntimeWriteException(ex) from None + + return handle + + +def parse_p4runtime_error(func): + """ + Parse P4Runtime error. + + :param func: function + :return: parsed error + """ + + @wraps(func) + def handle(*args, **kwargs): + try: + return func(*args, **kwargs) + except grpc.RpcError as ex: + raise P4RuntimeException(ex) from None + + return handle + + +class SSLOptions(NamedTuple): + """ + Tuple of SSL options. + """ + insecure: bool + cacert: str = None + cert: str = None + key: str = None + + +def read_pem_file(path): + """ + Load and read PEM file. + + :param path: path to PEM file + :return: file descriptor + """ + try: + with open(path, "rb") as f_d: + return f_d.read() + except (FileNotFoundError, IOError, OSError): + logging.critical("Cannot read from PEM file '%s'", path) + sys.exit(1) + + +@enum.unique +class WriteOperation(enum.Enum): + """ + Write Operations. + """ + insert = 1 + update = 2 + delete = 3 + + +def select_operation(mode): + """ + Select P4 operation based upon the operation mode. + + :param mode: operation mode + :return: P4 operation protobuf object + """ + if mode == WriteOperation.insert: + return p4runtime_pb2.Update.INSERT + if mode == WriteOperation.update: + return p4runtime_pb2.Update.UPDATE + if mode == WriteOperation.delete: + return p4runtime_pb2.Update.DELETE + return None + + +def select_entity_type(entity, update): + """ + Select P4 entity type for an update. + + :param entity: P4 entity object + :param update: update operation + :return: the correct update entity or None + """ + if isinstance(entity, p4runtime_pb2.TableEntry): + return update.entity.table_entry + if isinstance(entity, p4runtime_pb2.ActionProfileGroup): + return update.entity.action_profile_group + if isinstance(entity, p4runtime_pb2.ActionProfileMember): + return update.entity.action_profile_member + return None + + +class P4RuntimeClient: + """ + P4Runtime client. + + Attributes + ---------- + device_id : int + P4 device ID + grpc_address : str + IP address and port + election_id : tuple + Mastership election ID + role_name : str + Role name (optional) + ssl_options: tuple + SSL options" named tuple (optional) + """ + + def __init__(self, device_id, grpc_address, + election_id, role_name=None, ssl_options=None): + self.device_id = device_id + self.election_id = election_id + self.role_name = role_name + if ssl_options is None: + self.ssl_options = SSLOptions(True) + else: + self.ssl_options = ssl_options + LOGGER.debug( + "Connecting to device %d at %s", device_id, grpc_address) + + if self.ssl_options.insecure: + logging.debug("Using insecure channel") + self.channel = grpc.insecure_channel(grpc_address) + else: + # root certificates are retrieved from a default location + # chosen by gRPC runtime unless the user provides + # custom certificates. + root_certificates = None + if self.ssl_options.cacert is not None: + root_certificates = read_pem_file(self.ssl_options.cacert) + certificate_chain = None + if self.ssl_options.cert is not None: + certificate_chain = read_pem_file(self.ssl_options.cert) + private_key = None + if self.ssl_options.key is not None: + private_key = read_pem_file(self.ssl_options.key) + creds = grpc.ssl_channel_credentials(root_certificates, private_key, + certificate_chain) + self.channel = grpc.secure_channel(grpc_address, creds) + self.stream_in_q = None + self.stream_out_q = None + self.stream = None + self.stream_recv_thread = None + self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel) + + try: + self.set_up_stream() + except P4RuntimeException: + LOGGER.critical("Failed to connect to P4Runtime server") + sys.exit(1) + LOGGER.info("P4Runtime client is successfully invoked") + + def set_up_stream(self): + """ + Set up a gRPC stream. + """ + self.stream_out_q = queue.Queue() + # queues for different messages + self.stream_in_q = { + STREAM_ATTR_ARBITRATION: queue.Queue(), + STREAM_ATTR_PACKET: queue.Queue(), + STREAM_ATTR_DIGEST: queue.Queue(), + STREAM_ATTR_IDLE_NOT: queue.Queue(), + STREAM_ATTR_UNKNOWN: queue.Queue(), + } + + def stream_req_iterator(): + while True: + stream_p = self.stream_out_q.get() + if stream_p is None: + break + yield stream_p + + def stream_recv_wrapper(stream): + @parse_p4runtime_error + def stream_recv(): + for stream_p in stream: + if stream_p.HasField("arbitration"): + self.stream_in_q["arbitration"].put(stream_p) + elif stream_p.HasField("packet"): + self.stream_in_q["packet"].put(stream_p) + elif stream_p.HasField("digest"): + self.stream_in_q["digest"].put(stream_p) + else: + self.stream_in_q["unknown"].put(stream_p) + + try: + stream_recv() + except P4RuntimeException as ex: + logging.critical("StreamChannel error, closing stream") + logging.critical(ex) + for k in self.stream_in_q: + self.stream_in_q[k].put(None) + + self.stream = self.stub.StreamChannel(stream_req_iterator()) + self.stream_recv_thread = threading.Thread( + target=stream_recv_wrapper, args=(self.stream,)) + self.stream_recv_thread.start() + self.handshake() + + def handshake(self): + """ + Handshake with gRPC server. + """ + + req = p4runtime_pb2.StreamMessageRequest() + arbitration = req.arbitration + arbitration.device_id = self.device_id + election_id = arbitration.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + if self.role_name is not None: + arbitration.role.name = self.role_name + self.stream_out_q.put(req) + + rep = self.get_stream_packet(STREAM_ATTR_ARBITRATION, timeout=2) + if rep is None: + logging.critical("Failed to establish session with server") + sys.exit(1) + is_primary = (rep.arbitration.status.code == code_pb2.OK) + logging.debug("Session established, client is '%s'", + "primary" if is_primary else "backup") + if not is_primary: + print("You are not the primary client," + "you only have read access to the server") + + def get_stream_packet(self, type_, timeout=1): + """ + Get a new message from the stream. + + :param type_: stream type. + :param timeout: time to wait. + :return: message or None + """ + if type_ not in self.stream_in_q: + print("Unknown stream type 's"'', type_) + return None + try: + msg = self.stream_in_q[type_].get(timeout=timeout) + return msg + except queue.Empty: # timeout expired + return None + + @parse_p4runtime_error + def get_p4info(self): + """ + Retrieve P4Info content. + + :return: P4Info object. + """ + logging.debug("Retrieving P4Info file") + req = p4runtime_pb2.GetForwardingPipelineConfigRequest() + req.device_id = self.device_id + req.response_type = \ + p4runtime_pb2.GetForwardingPipelineConfigRequest.P4INFO_AND_COOKIE + rep = self.stub.GetForwardingPipelineConfig(req) + return rep.config.p4info + + @parse_p4runtime_error + def set_fwd_pipe_config(self, p4info_path, bin_path): + """ + Configure the pipeline. + + :param p4info_path: path to the P4Info file + :param bin_path: path to the binary file + :return: + """ + logging.debug("Setting forwarding pipeline config") + req = p4runtime_pb2.SetForwardingPipelineConfigRequest() + req.device_id = self.device_id + if self.role_name is not None: + req.role = self.role_name + election_id = req.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + req.action = \ + p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT + with open(p4info_path, "r", encoding="utf-8") as f_info: + with open(bin_path, "rb") as f_bin: + try: + google.protobuf.text_format.Merge( + f_info.read(), req.config.p4info) + except google.protobuf.text_format.ParseError: + logging.error("Error when parsing P4Info") + raise + req.config.p4_device_config = f_bin.read() + return self.stub.SetForwardingPipelineConfig(req) + + def tear_down(self): + """ + Tear connection with the gRPC server down. + """ + if self.stream_out_q: + logging.debug("Cleaning up stream") + self.stream_out_q.put(None) + if self.stream_in_q: + for k in self.stream_in_q: + self.stream_in_q[k].put(None) + if self.stream_recv_thread: + self.stream_recv_thread.join() + self.channel.close() + # avoid a race condition if channel deleted when process terminates + del self.channel + + @parse_p4runtime_write_error + def __write(self, entity, mode=WriteOperation.insert): + """ + Perform a write operation. + + :param entity: P4 entity to write + :param mode: operation mode (defaults to insert) + :return: void + """ + if isinstance(entity, (list, tuple)): + for ent in entity: + self.__write(ent) + return + req = self.__get_new_write_request() + update = req.updates.add() + update.type = select_operation(mode) + msg_entity = select_entity_type(entity, update) + if not msg_entity: + msg = f"{mode.name} operation for entity {entity.__name__}" \ + f"not supported" + raise P4RuntimeWriteException(msg) + msg_entity.CopyFrom(entity) + self.__simple_write(req) + + def __get_new_write_request(self): + """ + Create a new write request message. + + :return: write request message + """ + req = p4runtime_pb2.WriteRequest() + req.device_id = self.device_id + if self.role_name is not None: + req.role = self.role_name + election_id = req.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + return req + + @parse_p4runtime_write_error + def __simple_write(self, req): + """ + Send a write operation into the wire. + + :param req: write operation request + :return: void + """ + try: + return self.stub.Write(req) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.UNKNOWN: + raise ex + raise P4RuntimeWriteException(ex) from ex + + @parse_p4runtime_write_error + def insert(self, entity): + """ + Perform an insert write operation. + + :param entity: P4 entity to insert + :return: void + """ + return self.__write(entity, WriteOperation.insert) + + @parse_p4runtime_write_error + def update(self, entity): + """ + Perform an update write operation. + + :param entity: P4 entity to update + :return: void + """ + return self.__write(entity, WriteOperation.update) + + @parse_p4runtime_write_error + def delete(self, entity): + """ + Perform a delete write operation. + + :param entity: P4 entity to delete + :return: void + """ + return self.__write(entity, WriteOperation.delete) + + @parse_p4runtime_write_error + def write(self, req): + """ + Write device operation. + + :param req: write request message + :return: status + """ + req.device_id = self.device_id + if self.role_name is not None: + req.role = self.role_name + election_id = req.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + return self.__simple_write(req) + + @parse_p4runtime_write_error + def write_update(self, update): + """ + Update device operation. + + :param update: update request message + :return: status + """ + req = self.__get_new_write_request() + req.updates.extend([update]) + return self.__simple_write(req) + + # Decorator is useless here: in case of server error, + # the exception is raised during the iteration (when next() is called). + @parse_p4runtime_error + def read_one(self, entity): + """ + Read device operation. + + :param entity: P4 entity for which the read is issued + :return: status + """ + req = p4runtime_pb2.ReadRequest() + if self.role_name is not None: + req.role = self.role_name + req.device_id = self.device_id + req.entities.extend([entity]) + return self.stub.Read(req) + + @parse_p4runtime_error + def api_version(self): + """ + P4Runtime API version. + + :return: API version hex + """ + req = p4runtime_pb2.CapabilitiesRequest() + rep = self.stub.Capabilities(req) + return rep.p4runtime_api_version diff --git a/src/device/service/drivers/p4/p4_common.py b/src/device/service/drivers/p4/p4_common.py new file mode 100644 index 0000000000000000000000000000000000000000..bcafedc1f613bfe1d1739d72f89803155b720155 --- /dev/null +++ b/src/device/service/drivers/p4/p4_common.py @@ -0,0 +1,445 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This package contains several helper functions for encoding to and decoding from +byte strings: +- integers +- IPv4 address strings +- IPv6 address strings +- Ethernet address strings +as well as static variables used by various P4 driver components. +""" + +import logging +import math +import re +import socket +import ipaddress +from ctypes import c_uint16, sizeof +import macaddress + +from common.type_checkers.Checkers import chk_type +try: + from .p4_exception import UserBadValueError +except ImportError: + from p4_exception import UserBadValueError + +P4_ATTR_DEV_ID = "id" +P4_ATTR_DEV_NAME = "name" +P4_ATTR_DEV_VENDOR = "vendor" +P4_ATTR_DEV_HW_VER = "hw_ver" +P4_ATTR_DEV_SW_VER = "sw_ver" +P4_ATTR_DEV_P4BIN = "p4bin" +P4_ATTR_DEV_P4INFO = "p4info" +P4_ATTR_DEV_TIMEOUT = "timeout" + +P4_VAL_DEF_VENDOR = "Unknown" +P4_VAL_DEF_HW_VER = "BMv2 simple_switch" +P4_VAL_DEF_SW_VER = "Stratum" +P4_VAL_DEF_TIMEOUT = 60 + + +# Logger instance +LOGGER = logging.getLogger(__name__) + + +# MAC address encoding/decoding +mac_pattern = re.compile(r"^([\da-fA-F]{2}:){5}([\da-fA-F]{2})$") + + +def matches_mac(mac_addr_string): + """ + Check whether input string is a valid MAC address or not. + + :param mac_addr_string: string-based MAC address + :return: boolean status + """ + return mac_pattern.match(mac_addr_string) is not None + + +def encode_mac(mac_addr_string): + """ + Convert string-based MAC address into bytes. + + :param mac_addr_string: string-based MAC address + :return: MAC address in bytes + """ + return bytes(macaddress.MAC(mac_addr_string)) + + +def decode_mac(encoded_mac_addr): + """ + Convert a MAC address in bytes into string-based MAC address. + + :param encoded_mac_addr: MAC address in bytes + :return: string-based MAC address + """ + return str(macaddress.MAC(encoded_mac_addr)).replace("-", ":").lower() + + +# IP address encoding/decoding +IPV4_LOCALHOST = "localhost" + + +def matches_ipv4(ip_addr_string): + """ + Check whether input string is a valid IPv4 address or not. + + :param ip_addr_string: string-based IPv4 address + :return: boolean status + """ + if ip_addr_string == IPV4_LOCALHOST: + return True + try: + addr = ipaddress.ip_address(ip_addr_string) + return isinstance(addr, ipaddress.IPv4Address) + except ValueError: + return False + + +def encode_ipv4(ip_addr_string): + """ + Convert string-based IPv4 address into bytes. + + :param ip_addr_string: string-based IPv4 address + :return: IPv4 address in bytes + """ + return socket.inet_aton(ip_addr_string) + + +def decode_ipv4(encoded_ip_addr): + """ + Convert an IPv4 address in bytes into string-based IPv4 address. + + :param encoded_ip_addr: IPv4 address in bytes + :return: string-based IPv4 address + """ + return socket.inet_ntoa(encoded_ip_addr) + + +def matches_ipv6(ip_addr_string): + """ + Check whether input string is a valid IPv6 address or not. + + :param ip_addr_string: string-based IPv6 address + :return: boolean status + """ + try: + addr = ipaddress.ip_address(ip_addr_string) + return isinstance(addr, ipaddress.IPv6Address) + except ValueError: + return False + + +def encode_ipv6(ip_addr_string): + """ + Convert string-based IPv6 address into bytes. + + :param ip_addr_string: string-based IPv6 address + :return: IPv6 address in bytes + """ + return socket.inet_pton(socket.AF_INET6, ip_addr_string) + + +def decode_ipv6(encoded_ip_addr): + """ + Convert an IPv6 address in bytes into string-based IPv6 address. + + :param encoded_ip_addr: IPv6 address in bytes + :return: string-based IPv6 address + """ + return str(ipaddress.ip_address(encoded_ip_addr)) + + +# Numerical encoding/decoding + + +def limits(c_int_type): + """ + Discover limits of numerical type. + + :param c_int_type: numerical type + :return: tuple of numerical type's limits + """ + signed = c_int_type(-1).value < c_int_type(0).value + bit_size = sizeof(c_int_type) * 8 + signed_limit = 2 ** (bit_size - 1) + return (-signed_limit, signed_limit - 1) \ + if signed else (0, 2 * signed_limit - 1) + + +def valid_port(port): + """ + Check whether input is a valid port number or not. + + :param port: port number + :return: boolean status + """ + lim = limits(c_uint16) + return lim[0] <= port <= lim[1] + + +def bitwidth_to_bytes(bitwidth): + """ + Convert number of bits to number of bytes. + + :param bitwidth: number of bits + :return: number of bytes + """ + return int(math.ceil(bitwidth / 8.0)) + + +def encode_num(number, bitwidth): + """ + Convert number into bytes. + + :param number: number to convert + :param bitwidth: number of bits + :return: number in bytes + """ + byte_len = bitwidth_to_bytes(bitwidth) + return number.to_bytes(byte_len, byteorder="big") + + +def decode_num(encoded_number): + """ + Convert number in bytes into its numerical form. + + :param encoded_number: number in bytes to convert + :return: numerical number form + """ + return int.from_bytes(encoded_number, "big") + + +# Umbrella encoder + + +def encode(variable, bitwidth): + """ + Tries to infer the type of `input` and encode it. + + :param variable: target variable + :param bitwidth: size of variable in bits + :return: encoded bytes + """ + byte_len = bitwidth_to_bytes(bitwidth) + if isinstance(variable, (list, tuple)) and len(variable) == 1: + variable = variable[0] + + if isinstance(variable, int): + encoded_bytes = encode_num(variable, bitwidth) + elif isinstance(variable, str): + if matches_mac(variable): + encoded_bytes = encode_mac(variable) + elif matches_ipv4(variable): + encoded_bytes = encode_ipv4(variable) + elif matches_ipv6(variable): + encoded_bytes = encode_ipv6(variable) + else: + try: + value = int(variable, 0) + except ValueError as ex: + raise UserBadValueError( + f"Invalid value '{variable}': " + "could not cast to integer, try in hex with 0x prefix")\ + from ex + encoded_bytes = value.to_bytes(byte_len, byteorder="big") + else: + raise Exception( + f"Encoding objects of {type(variable)} is not supported") + assert len(encoded_bytes) == byte_len + return encoded_bytes + + +# Parsers + + +def get_match_field_value(match_field): + """ + Retrieve the value of a certain match field by name. + + :param match_field: match field + :return: match filed value + """ + match_type = match_field.WhichOneof("field_match_type") + if match_type == "valid": + return match_field.valid.value + if match_type == "exact": + return match_field.exact.value + if match_type == "lpm": + return match_field.lpm.value, match_field.lpm.prefix_len + if match_type == "ternary": + return match_field.ternary.value, match_field.ternary.mask + if match_type == "range": + return match_field.range.low, match_field.range.high + raise Exception(f"Unsupported match type with type {match_type}") + + +def parse_resource_string_from_json(resource, resource_str="table-name"): + """ + Parse a given resource name within a JSON-based object. + + :param resource: JSON-based object + :param resource_str: resource string to parse + :return: value of the parsed resource string + """ + if not resource or (resource_str not in resource): + LOGGER.warning("JSON entry misses '%s' attribute", resource_str) + return None + chk_type(resource_str, resource[resource_str], str) + return resource[resource_str] + + +def parse_resource_number_from_json(resource, resource_nb): + """ + Parse a given resource number within a JSON-based object. + + :param resource: JSON-based object + :param resource_nb: resource number to parse + :return: value of the parsed resource number + """ + if not resource or (resource_nb not in resource): + LOGGER.warning( + "JSON entry misses '%s' attribute", resource_nb) + return None + chk_type(resource_nb, resource[resource_nb], int) + return resource[resource_nb] + + +def parse_resource_integer_from_json(resource, resource_nb): + """ + Parse a given integer number within a JSON-based object. + + :param resource: JSON-based object + :param resource_nb: resource number to parse + :return: value of the parsed resource number + """ + num = parse_resource_number_from_json(resource, resource_nb) + if num: + return int(num) + return -1 + + +def parse_resource_float_from_json(resource, resource_nb): + """ + Parse a given floating point number within a JSON-based object. + + :param resource: JSON-based object + :param resource_nb: resource number to parse + :return: value of the parsed resource number + """ + num = parse_resource_number_from_json(resource, resource_nb) + if num: + return float(num) + return -1.0 + + +def parse_resource_bytes_from_json(resource, resource_bytes): + """ + Parse given resource bytes within a JSON-based object. + + :param resource: JSON-based object + :param resource_bytes: resource bytes to parse + :return: value of the parsed resource bytes + """ + if not resource or (resource_bytes not in resource): + LOGGER.debug( + "JSON entry misses '%s' attribute", resource_bytes) + return None + + if resource_bytes in resource: + chk_type(resource_bytes, resource[resource_bytes], bytes) + return resource[resource_bytes] + return None + + +def parse_match_operations_from_json(resource): + """ + Parse the match operations within a JSON-based object. + + :param resource: JSON-based object + :return: map of match operations + """ + if not resource or ("match-fields" not in resource): + LOGGER.warning( + "JSON entry misses 'match-fields' list of attributes") + return {} + chk_type("match-fields", resource["match-fields"], list) + + match_map = {} + for mf_entry in resource["match-fields"]: + if ("match-field" not in mf_entry) or \ + ("match-value" not in mf_entry): + LOGGER.warning( + "JSON entry misses 'match-field' and/or " + "'match-value' attributes") + return None + chk_type("match-field", mf_entry["match-field"], str) + chk_type("match-value", mf_entry["match-value"], str) + match_map[mf_entry["match-field"]] = mf_entry["match-value"] + + return match_map + + +def parse_action_parameters_from_json(resource): + """ + Parse the action parameters within a JSON-based object. + + :param resource: JSON-based object + :return: map of action parameters + """ + if not resource or ("action-params" not in resource): + LOGGER.warning( + "JSON entry misses 'action-params' list of attributes") + return None + chk_type("action-params", resource["action-params"], list) + + action_name = parse_resource_string_from_json(resource, "action-name") + + action_params = {} + for ac_entry in resource["action-params"]: + if not ac_entry: + LOGGER.debug( + "Missing action parameter for action %s", action_name) + continue + chk_type("action-param", ac_entry["action-param"], str) + chk_type("action-value", ac_entry["action-value"], str) + action_params[ac_entry["action-param"]] = \ + ac_entry["action-value"] + + return action_params + + +def parse_integer_list_from_json(resource, resource_list, resource_item): + """ + Parse the list of integers within a JSON-based object. + + :param resource: JSON-based object + :param resource_list: name of the resource list + :param resource_item: name of the resource item + :return: list of integers + """ + if not resource or (resource_list not in resource): + LOGGER.warning( + "JSON entry misses '%s' list of attributes", resource_list) + return [] + chk_type(resource_list, resource[resource_list], list) + + integers_list = [] + for item in resource[resource_list]: + chk_type(resource_item, item[resource_item], int) + integers_list.append(item[resource_item]) + + return integers_list diff --git a/src/device/service/drivers/p4/p4_context.py b/src/device/service/drivers/p4/p4_context.py new file mode 100644 index 0000000000000000000000000000000000000000..ab01c422fe478cfe26c2f7331fc9b4653521db9f --- /dev/null +++ b/src/device/service/drivers/p4/p4_context.py @@ -0,0 +1,284 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Build some context around a given P4 info file. +""" + +from collections import Counter +import enum +from functools import partialmethod + + +@enum.unique +class P4Type(enum.Enum): + """ + P4 types. + """ + table = 1 + action = 2 + action_profile = 3 + counter = 4 + direct_counter = 5 + meter = 6 + direct_meter = 7 + controller_packet_metadata = 8 + + +P4Type.table.p4info_name = "tables" +P4Type.action.p4info_name = "actions" +P4Type.action_profile.p4info_name = "action_profiles" +P4Type.counter.p4info_name = "counters" +P4Type.direct_counter.p4info_name = "direct_counters" +P4Type.meter.p4info_name = "meters" +P4Type.direct_meter.p4info_name = "direct_meters" +P4Type.controller_packet_metadata.p4info_name = "controller_packet_metadata" + +for object_type in P4Type: + object_type.pretty_name = object_type.name.replace('_', ' ') + object_type.pretty_names = object_type.pretty_name + 's' + + +@enum.unique +class P4RuntimeEntity(enum.Enum): + """ + P4 runtime entities. + """ + table_entry = 1 + action_profile_member = 2 + action_profile_group = 3 + meter_entry = 4 + direct_meter_entry = 5 + counter_entry = 6 + direct_counter_entry = 7 + packet_replication_engine_entry = 8 + + +class Context: + """ + P4 context. + """ + def __init__(self): + self.p4info = None + self.p4info_obj_map = {} + self.p4info_obj_map_by_id = {} + self.p4info_objs_by_type = {} + + def set_p4info(self, p4info): + """ + Set a p4 info file. + + :param p4info: p4 info file + :return: void + """ + self.p4info = p4info + self._import_p4info_names() + + def get_obj(self, obj_type, name): + """ + Retrieve an object by type and name. + + :param obj_type: P4 object type + :param name: P4 object name + :return: P4 object + """ + key = (obj_type, name) + return self.p4info_obj_map.get(key, None) + + def get_obj_id(self, obj_type, name): + """ + Retrieve a P4 object's ID by type and name. + + :param obj_type: P4 object type + :param name: P4 object name + :return: P4 object ID + """ + obj = self.get_obj(obj_type, name) + if obj is None: + return None + return obj.preamble.id + + def get_param(self, action_name, name): + """ + Get an action parameter by action name. + + :param action_name: P4 action name + :param name: action parameter name + :return: action parameter + """ + action = self.get_obj(P4Type.action, action_name) + if action is None: + return None + for param in action.params: + if param.name == name: + return param + return None + + def get_mf(self, table_name, name): + """ + Get a table's match field by name. + + :param table_name: P4 table name + :param name: match field name + :return: match field + """ + table = self.get_obj(P4Type.table, table_name) + if table is None: + return None + for match_field in table.match_fields: + if match_field.name == name: + return match_field + return None + + def get_param_id(self, action_name, name): + """ + Get an action parameter ID by the action and parameter names. + + :param action_name: P4 action name + :param name: action parameter name + :return: action parameter ID + """ + param = self.get_param(action_name, name) + return None if param is None else param.id + + def get_mf_id(self, table_name, name): + """ + Get a table's match field ID by name. + + :param table_name: P4 table name + :param name: match field name + :return: match field ID + """ + match_field = self.get_mf(table_name, name) + return None if match_field is None else match_field.id + + def get_param_name(self, action_name, id_): + """ + Get an action parameter name by the action name and action ID. + + :param action_name: P4 action name + :param id_: action parameter ID + :return: action parameter name + """ + action = self.get_obj(P4Type.action, action_name) + if action is None: + return None + for param in action.params: + if param.id == id_: + return param.name + return None + + def get_mf_name(self, table_name, id_): + """ + Get a table's match field name by ID. + + :param table_name: P4 table name + :param id_: match field ID + :return: match field name + """ + table = self.get_obj(P4Type.table, table_name) + if table is None: + return None + for match_field in table.match_fields: + if match_field.id == id_: + return match_field.name + return None + + def get_objs(self, obj_type): + """ + Get P4 objects by type. + + :param obj_type: P4 object type + :return: list of tuples (object name, object) + """ + objects = self.p4info_objs_by_type[obj_type] + for name, obj in objects.items(): + yield name, obj + + def get_name_from_id(self, id_): + """ + Get P4 object name by its ID. + + :param id_: P4 object ID + :return: P4 object name + """ + return self.p4info_obj_map_by_id[id_].preamble.name + + def get_obj_by_id(self, id_): + """ + Get P4 object by its ID. + + :param id_: P4 object ID + :return: P4 object + """ + return self.p4info_obj_map_by_id[id_] + + def get_packet_metadata_name_from_id(self, ctrl_pkt_md_name, id_): + """ + Get packet metadata name by ID. + + :param ctrl_pkt_md_name: packet replication entity name + :param id_: packet metadata ID + :return: packet metadata name + """ + ctrl_pkt_md = self.get_obj( + P4Type.controller_packet_metadata, ctrl_pkt_md_name) + if not ctrl_pkt_md: + return None + for meta in ctrl_pkt_md.metadata: + if meta.id == id_: + return meta.name + return None + + # We accept any suffix that uniquely identifies the object + # among p4info objects of the same type. + def _import_p4info_names(self): + """ + Import p4 info into memory. + + :return: void + """ + suffix_count = Counter() + for obj_type in P4Type: + self.p4info_objs_by_type[obj_type] = {} + for obj in getattr(self.p4info, obj_type.p4info_name): + pre = obj.preamble + self.p4info_obj_map_by_id[pre.id] = obj + self.p4info_objs_by_type[obj_type][pre.name] = obj + suffix = None + for suf in reversed(pre.name.split(".")): + suffix = suf if suffix is None else suf + "." + suffix + key = (obj_type, suffix) + self.p4info_obj_map[key] = obj + suffix_count[key] += 1 + for key, cnt in suffix_count.items(): + if cnt > 1: + del self.p4info_obj_map[key] + + +# Add p4info object and object id "getters" for each object type; +# these are just wrappers around Context.get_obj and Context.get_obj_id. +# For example: get_table(x) and get_table_id(x) respectively call +# get_obj(P4Type.table, x) and get_obj_id(P4Type.table, x) +for object_type in P4Type: + object_name = "_".join(["get", object_type.name]) + setattr(Context, object_name, partialmethod( + Context.get_obj, object_type)) + object_name = "_".join(["get", object_type.name, "id"]) + setattr(Context, object_name, partialmethod( + Context.get_obj_id, object_type)) + +for object_type in P4Type: + object_name = "_".join(["get", object_type.p4info_name]) + setattr(Context, object_name, partialmethod(Context.get_objs, object_type)) diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py index af05952b313d1632eacd5962cc34c4aa1b6b5a10..069c07ce40e43192b74519b2175e7e10c638cd20 100644 --- a/src/device/service/drivers/p4/p4_driver.py +++ b/src/device/service/drivers/p4/p4_driver.py @@ -16,13 +16,22 @@ P4 driver plugin for the TeraFlow SDN controller. """ +import os +import json import logging import threading from typing import Any, Iterator, List, Optional, Tuple, Union -from .p4_util import P4RuntimeClient,\ +from common.type_checkers.Checkers import chk_type, chk_length, chk_string +from .p4_common import matches_ipv4, matches_ipv6, valid_port,\ P4_ATTR_DEV_ID, P4_ATTR_DEV_NAME, P4_ATTR_DEV_VENDOR,\ - P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER, P4_ATTR_DEV_PIPECONF,\ - P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER, P4_VAL_DEF_PIPECONF + P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER,\ + P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\ + P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER,\ + P4_VAL_DEF_TIMEOUT +from .p4_manager import P4Manager, get_api_version, KEY_TABLE,\ + KEY_ACTION_PROFILE, KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\ + KEY_CTL_PKT_METADATA +from .p4_client import WriteOperation try: from _Driver import _Driver @@ -53,208 +62,543 @@ class P4Driver(_Driver): Hardware version of the P4 device (Optional) sw_ver : str Software version of the P4 device (Optional) - pipeconf : str - P4 device table configuration (Optional) + p4bin : str + Path to P4 binary file (Optional, but must be combined with p4info) + p4info : str + Path to P4 info file (Optional, but must be combined with p4bin) + timeout : int + Device timeout in seconds (Optional) """ def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called - self.__client = None + self.__manager = None self.__address = address self.__port = int(port) + self.__endpoint = None self.__settings = settings - - try: - self.__dev_id = self.__settings.get(P4_ATTR_DEV_ID) - except Exception as ex: - LOGGER.error('P4 device ID is a mandatory setting') - raise Exception from ex - - if P4_ATTR_DEV_NAME in self.__settings: - self.__dev_name = self.__settings.get(P4_ATTR_DEV_NAME) - else: - self.__dev_name = str(self.__dev_id) - LOGGER.warning( - 'No device name is provided. Setting default name: %s', - self.__dev_name) - - if P4_ATTR_DEV_VENDOR in self.__settings: - self.__dev_vendor = self.__settings.get(P4_ATTR_DEV_VENDOR) - else: - self.__dev_vendor = P4_VAL_DEF_VENDOR - LOGGER.warning( - 'No vendor is provided. Setting default vendor: %s', - self.__dev_vendor) - - if P4_ATTR_DEV_HW_VER in self.__settings: - self.__dev_hw_version = self.__settings.get(P4_ATTR_DEV_HW_VER) - else: - self.__dev_hw_version = P4_VAL_DEF_HW_VER - LOGGER.warning( - 'No HW version is provided. Setting default HW version: %s', - self.__dev_hw_version) - - if P4_ATTR_DEV_SW_VER in self.__settings: - self.__dev_sw_version = self.__settings.get(P4_ATTR_DEV_SW_VER) - else: - self.__dev_sw_version = P4_VAL_DEF_SW_VER - LOGGER.warning( - 'No SW version is provided. Setting default SW version: %s', - self.__dev_sw_version) - - if P4_ATTR_DEV_PIPECONF in self.__settings: - self.__dev_pipeconf = self.__settings.get(P4_ATTR_DEV_PIPECONF) - else: - self.__dev_pipeconf = P4_VAL_DEF_PIPECONF - LOGGER.warning( - 'No P4 pipeconf is provided. Setting default P4 pipeconf: %s', - self.__dev_pipeconf) - + self.__id = None + self.__name = None + self.__vendor = P4_VAL_DEF_VENDOR + self.__hw_version = P4_VAL_DEF_HW_VER + self.__sw_version = P4_VAL_DEF_SW_VER + self.__p4bin_path = None + self.__p4info_path = None + self.__timeout = P4_VAL_DEF_TIMEOUT self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - LOGGER.info('Initializing P4 device at %s:%d with settings:', + self.__parse_and_validate_settings() + + LOGGER.info("Initializing P4 device at %s:%d with settings:", self.__address, self.__port) for key, value in settings.items(): - LOGGER.info('\t%8s = %s', key, value) + LOGGER.info("\t%8s = %s", key, value) def Connect(self) -> bool: """ - Establishes a connection between the P4 device driver and a P4 device. + Establish a connection between the P4 device driver and a P4 device. :return: boolean connection status. """ - LOGGER.info( - 'Connecting to P4 device %s:%d ...', - self.__address, self.__port) + LOGGER.info("Connecting to P4 device %s ...", self.__endpoint) with self.__lock: # Skip if already connected if self.__started.is_set(): return True - # Instantiate a gRPC channel with the P4 device - grpc_address = f'{self.__address}:{self.__port}' + # Dynamically devise an election ID election_id = (1, 0) - self.__client = P4RuntimeClient( - self.__dev_id, grpc_address, election_id) - LOGGER.info('\tConnected!') + + # Spawn a P4 manager for this device + self.__manager = P4Manager( + device_id=self.__id, + ip_address=self.__address, + port=self.__port, + election_id=election_id) + assert self.__manager + + # Start the P4 manager + try: + self.__manager.start(self.__p4bin_path, self.__p4info_path) + except Exception as ex: # pylint: disable=broad-except + raise Exception(ex) from ex + + LOGGER.info("\tConnected via P4Runtime version %s", + get_api_version()) self.__started.set() return True def Disconnect(self) -> bool: """ - Terminates the connection between the P4 device driver and a P4 device. + Terminate the connection between the P4 device driver and a P4 device. :return: boolean disconnection status. """ - LOGGER.info( - 'Disconnecting from P4 device %s:%d ...', - self.__address, self.__port) + LOGGER.info("Disconnecting from P4 device %s ...", self.__endpoint) # If not started, assume it is already disconnected if not self.__started.is_set(): return True - # gRPC client must already be instantiated - assert self.__client + # P4 manager must already be instantiated + assert self.__manager # Trigger termination of loops and processes self.__terminate.set() # Trigger connection tear down with the P4Runtime server - self.__client.tear_down() - self.__client = None + self.__manager.stop() + self.__manager = None - LOGGER.info('\tDisconnected!') + LOGGER.info("\tDisconnected!") return True def GetInitialConfig(self) -> List[Tuple[str, Any]]: """ - Retrieves the initial configuration of a P4 device. + Retrieve the initial configuration of a P4 device. :return: list of initial configuration items. """ - LOGGER.info('P4 GetInitialConfig()') - return [] + initial_conf = [] - def GetConfig(self, resource_keys : List[str] = [])\ + with self.__lock: + if not initial_conf: + LOGGER.warning("No initial configuration for P4 device %s ...", + self.__endpoint) + return [] + + def GetConfig(self, resource_keys: List[str] = [])\ -> List[Tuple[str, Union[Any, None, Exception]]]: """ - Retrieves the current configuration of a P4 device. + Retrieve the current configuration of a P4 device. - :param resource_keys: configuration parameters to retrieve. - :return: list of values associated with the requested resource keys. + :param resource_keys: P4 resource keys to retrieve. + :return: list of values associated with the requested resource keys or + None/Exception. """ + LOGGER.info( + "Getting configuration from P4 device %s ...", self.__endpoint) - LOGGER.info('P4 GetConfig()') - return [] + # No resource keys means fetch all configuration + if len(resource_keys) == 0: + LOGGER.warning( + "GetConfig with no resource keys " + "implies getting all resource keys!") + resource_keys = [ + obj_name for obj_name, _ in self.__manager.p4_objects.items() + ] + + # Verify the input type + chk_type("resources", resource_keys, list) + + with self.__lock: + return self.__get_resources(resource_keys) - def SetConfig(self, resources : List[Tuple[str, Any]])\ + def SetConfig(self, resources: List[Tuple[str, Any]])\ -> List[Union[bool, Exception]]: """ - Submits a new configuration to a P4 device. + Submit a new configuration to a P4 device. - :param resources: configuration parameters to set. - :return: list of results for resource key changes requested. + :param resources: P4 resources to set. + :return: list of boolean results or Exceptions for resource key + changes requested. """ - LOGGER.info('P4 SetConfig()') - return [] + LOGGER.info( + "Setting configuration to P4 device %s ...", self.__endpoint) - def DeleteConfig(self, resources : List[Tuple[str, Any]])\ + if not resources or len(resources) == 0: + LOGGER.warning( + "SetConfig requires a list of resources to store " + "into the device. Nothing is provided though.") + return [] + + assert isinstance(resources, list) + + with self.__lock: + return self.__set_resources(resources) + + def DeleteConfig(self, resources: List[Tuple[str, Any]])\ -> List[Union[bool, Exception]]: """ - Revokes P4 device configuration. + Revoke P4 device configuration. :param resources: list of tuples with resource keys to be deleted. - :return: list of results for resource key deletions requested. + :return: list of boolean results or Exceptions for resource key + deletions requested. """ - LOGGER.info('P4 DeleteConfig()') - return [] + LOGGER.info( + "Deleting configuration from P4 device %s ...", self.__endpoint) + + if not resources or len(resources) == 0: + LOGGER.warning( + "DeleteConfig requires a list of resources to delete " + "from the device. Nothing is provided though.") + return [] - def GetResource(self, endpoint_uuid : str) -> Optional[str]: + with self.__lock: + return self.__delete_resources(resources) + + def GetResource(self, endpoint_uuid: str) -> Optional[str]: """ - Retrieves a certain resource from a P4 device. + Retrieve a certain resource from a P4 device. :param endpoint_uuid: target endpoint UUID. :return: The path of the endpoint or None if not found. """ - LOGGER.info('P4 GetResource()') + LOGGER.warning("GetResource() RPC not yet implemented by the P4 driver") return "" - def GetState(self, blocking=False, terminate : Optional[threading.Event] = None) -> Iterator[Tuple[str, Any]]: + def GetState(self, + blocking=False, + terminate: Optional[threading.Event] = None) -> \ + Iterator[Tuple[str, Any]]: """ - Retrieves the state of a P4 device. + Retrieve the state of a P4 device. :param blocking: if non-blocking, the driver terminates the loop and returns. + :param terminate: termination flag. :return: sequences of state sample. """ - LOGGER.info('P4 GetState()') + LOGGER.warning("GetState() RPC not yet implemented by the P4 driver") return [] - def SubscribeState(self, subscriptions : List[Tuple[str, float, float]])\ + def SubscribeState(self, subscriptions: List[Tuple[str, float, float]])\ -> List[Union[bool, Exception]]: """ - Subscribes to certain state information. + Subscribe to certain state information. :param subscriptions: list of tuples with resources to be subscribed. :return: list of results for resource subscriptions requested. """ - LOGGER.info('P4 SubscribeState()') - return [] + LOGGER.warning( + "SubscribeState() RPC not yet implemented by the P4 driver") + return [False for _ in subscriptions] - def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]])\ + def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]])\ -> List[Union[bool, Exception]]: """ - Unsubscribes from certain state information. + Unsubscribe from certain state information. :param subscriptions: list of tuples with resources to be unsubscribed. :return: list of results for resource un-subscriptions requested. """ - LOGGER.info('P4 UnsubscribeState()') - return [] + LOGGER.warning( + "UnsubscribeState() RPC not yet implemented by the P4 driver") + return [False for _ in subscriptions] + + def get_manager(self): + """ + Get an instance of the P4 manager. + + :return: P4 manager instance + """ + return self.__manager + + def __parse_and_validate_settings(self): + """ + Verify that the driver inputs comply to what is expected. + + :return: void or exception in case of validation error + """ + # Device endpoint information + assert matches_ipv4(self.__address) or (matches_ipv6(self.__address)),\ + f"{self.__address} not a valid IPv4 or IPv6 address" + assert valid_port(self.__port), \ + f"{self.__port} not a valid transport port" + self.__endpoint = f"{self.__address}:{self.__port}" + + # Device ID + try: + self.__id = self.__settings.get(P4_ATTR_DEV_ID) + except Exception as ex: + LOGGER.error("P4 device ID is a mandatory setting") + raise Exception from ex + + # Device name + if P4_ATTR_DEV_NAME in self.__settings: + self.__name = self.__settings.get(P4_ATTR_DEV_NAME) + else: + self.__name = str(self.__id) + LOGGER.warning( + "No device name is provided. Setting default name: %s", + self.__name) + + # Device vendor + if P4_ATTR_DEV_VENDOR in self.__settings: + self.__vendor = self.__settings.get(P4_ATTR_DEV_VENDOR) + else: + LOGGER.warning( + "No device vendor is provided. Setting default vendor: %s", + self.__vendor) + + # Device hardware version + if P4_ATTR_DEV_HW_VER in self.__settings: + self.__hw_version = self.__settings.get(P4_ATTR_DEV_HW_VER) + else: + LOGGER.warning( + "No HW version is provided. Setting default HW version: %s", + self.__hw_version) + + # Device software version + if P4_ATTR_DEV_SW_VER in self.__settings: + self.__sw_version = self.__settings.get(P4_ATTR_DEV_SW_VER) + else: + LOGGER.warning( + "No SW version is provided. Setting default SW version: %s", + self.__sw_version) + + # Path to P4 binary file + if P4_ATTR_DEV_P4BIN in self.__settings: + self.__p4bin_path = self.__settings.get(P4_ATTR_DEV_P4BIN) + assert os.path.exists(self.__p4bin_path),\ + "Invalid path to p4bin file" + assert P4_ATTR_DEV_P4INFO in self.__settings,\ + "p4info and p4bin settings must be provided together" + + # Path to P4 info file + if P4_ATTR_DEV_P4INFO in self.__settings: + self.__p4info_path = self.__settings.get(P4_ATTR_DEV_P4INFO) + assert os.path.exists(self.__p4info_path),\ + "Invalid path to p4info file" + assert P4_ATTR_DEV_P4BIN in self.__settings,\ + "p4info and p4bin settings must be provided together" + + if (not self.__p4bin_path) or (not self.__p4info_path): + LOGGER.warning( + "No P4 binary and info files are provided, hence " + "no pipeline will be installed on the whitebox device.\n" + "This driver will attempt to manage whatever pipeline " + "is available on the target device.") + + # Device timeout + if P4_ATTR_DEV_TIMEOUT in self.__settings: + self.__timeout = self.__settings.get(P4_ATTR_DEV_TIMEOUT) + assert self.__timeout > 0,\ + "Device timeout must be a positive integer" + else: + LOGGER.warning( + "No device timeout is provided. Setting default timeout: %s", + self.__timeout) + + def __get_resources(self, resource_keys): + """ + Retrieve the current configuration of a P4 device. + + :param resource_keys: P4 resource keys to retrieve. + :return: list of values associated with the requested resource keys or + None/Exception. + """ + resources = [] + + LOGGER.debug("GetConfig() -> Keys: %s", resource_keys) + + for resource_key in resource_keys: + entries = [] + try: + if KEY_TABLE == resource_key: + for table_name in self.__manager.get_table_names(): + t_entries = self.__manager.table_entries_to_json( + table_name) + if t_entries: + entries.append(t_entries) + elif KEY_COUNTER == resource_key: + for cnt_name in self.__manager.get_counter_names(): + c_entries = self.__manager.counter_entries_to_json( + cnt_name) + if c_entries: + entries.append(c_entries) + elif KEY_DIR_COUNTER == resource_key: + for d_cnt_name in self.__manager.get_direct_counter_names(): + dc_entries = \ + self.__manager.direct_counter_entries_to_json( + d_cnt_name) + if dc_entries: + entries.append(dc_entries) + elif KEY_METER == resource_key: + for meter_name in self.__manager.get_meter_names(): + m_entries = self.__manager.meter_entries_to_json( + meter_name) + if m_entries: + entries.append(m_entries) + elif KEY_DIR_METER == resource_key: + for d_meter_name in self.__manager.get_direct_meter_names(): + dm_entries = \ + self.__manager.direct_meter_entries_to_json( + d_meter_name) + if dm_entries: + entries.append(dm_entries) + elif KEY_ACTION_PROFILE == resource_key: + for ap_name in self.__manager.get_action_profile_names(): + ap_entries = \ + self.__manager.action_prof_member_entries_to_json( + ap_name) + if ap_entries: + entries.append(ap_entries) + elif KEY_CTL_PKT_METADATA == resource_key: + msg = f"{resource_key.capitalize()} is not a " \ + f"retrievable resource" + raise Exception(msg) + else: + msg = f"GetConfig failed due to invalid " \ + f"resource key: {resource_key}" + raise Exception(msg) + resources.append( + (resource_key, entries if entries else None) + ) + except Exception as ex: # pylint: disable=broad-except + resources.append((resource_key, ex)) + + return resources + + def __set_resources(self, resources): + """ + Submit a new configuration to a P4 device. + + :param resources: P4 resources to set. + :return: list of boolean results or Exceptions for resource key + changes requested. + """ + results = [] + + for i, resource in enumerate(resources): + str_resource_name = f"resources[#{i}]" + resource_key = "" + try: + chk_type( + str_resource_name, resource, (list, tuple)) + chk_length( + str_resource_name, resource, min_length=2, max_length=2) + resource_key, resource_value = resource + chk_string( + str_resource_name, resource_key, allow_empty=False) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception( + "Exception validating %s: %s", + str_resource_name, str(resource_key)) + results.append(e) # store the exception if validation fails + continue + + try: + resource_value = json.loads(resource_value) + except Exception: # pylint: disable=broad-except + pass + + LOGGER.debug( + "SetConfig() -> Key: %s - Value: %s", + resource_key, resource_value) + + # Default operation is insert. + # P4 manager has internal logic to judge whether an entry + # to be inserted already exists, thus simply needs an update. + operation = WriteOperation.insert + + try: + self.__apply_operation(resource_key, resource_value, operation) + results.append(True) + except Exception as ex: # pylint: disable=broad-except + results.append(ex) + + print(results) + + return results + + def __delete_resources(self, resources): + """ + Revoke P4 device configuration. + + :param resources: list of tuples with resource keys to be deleted. + :return: list of boolean results or Exceptions for resource key + deletions requested. + """ + results = [] + + for i, resource in enumerate(resources): + str_resource_name = f"resources[#{i}]" + resource_key = "" + try: + chk_type( + str_resource_name, resource, (list, tuple)) + chk_length( + str_resource_name, resource, min_length=2, max_length=2) + resource_key, resource_value = resource + chk_string( + str_resource_name, resource_key, allow_empty=False) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception( + "Exception validating %s: %s", + str_resource_name, str(resource_key)) + results.append(e) # store the exception if validation fails + continue + + try: + resource_value = json.loads(resource_value) + except Exception: # pylint: disable=broad-except + pass + + LOGGER.debug("DeleteConfig() -> Key: %s - Value: %s", + resource_key, resource_value) + + operation = WriteOperation.delete + + try: + self.__apply_operation(resource_key, resource_value, operation) + results.append(True) + except Exception as ex: # pylint: disable=broad-except + results.append(ex) + + print(results) + + return results + + def __apply_operation( + self, resource_key, resource_value, operation: WriteOperation): + """ + Apply a write operation to a P4 resource. + + :param resource_key: P4 resource key + :param resource_value: P4 resource value in JSON format + :param operation: write operation (i.e., insert, update, delete) + to apply + :return: True if operation is successfully applied or raise Exception + """ + + # Apply settings to the various tables + if KEY_TABLE == resource_key: + self.__manager.table_entry_operation_from_json( + resource_value, operation) + elif KEY_COUNTER == resource_key: + self.__manager.counter_entry_operation_from_json( + resource_value, operation) + elif KEY_DIR_COUNTER == resource_key: + self.__manager.direct_counter_entry_operation_from_json( + resource_value, operation) + elif KEY_METER == resource_key: + self.__manager.meter_entry_operation_from_json( + resource_value, operation) + elif KEY_DIR_METER == resource_key: + self.__manager.direct_meter_entry_operation_from_json( + resource_value, operation) + elif KEY_ACTION_PROFILE == resource_key: + self.__manager.action_prof_member_entry_operation_from_json( + resource_value, operation) + self.__manager.action_prof_group_entry_operation_from_json( + resource_value, operation) + elif KEY_CTL_PKT_METADATA == resource_key: + msg = f"{resource_key.capitalize()} is not a " \ + f"configurable resource" + raise Exception(msg) + else: + msg = f"{operation} on invalid key {resource_key}" + LOGGER.error(msg) + raise Exception(msg) + + LOGGER.debug("%s operation: %s", resource_key.capitalize(), operation) + + return True diff --git a/src/device/service/drivers/p4/p4_exception.py b/src/device/service/drivers/p4/p4_exception.py new file mode 100644 index 0000000000000000000000000000000000000000..3e3afb723b3850fd9a9b2b1c4982bf8ae31b20f7 --- /dev/null +++ b/src/device/service/drivers/p4/p4_exception.py @@ -0,0 +1,135 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4 driver exceptions. +""" + + +class UserError(Exception): + """ + User error exception. + """ + def __init__(self, info=""): + super().__init__() + self.info = info + + def __str__(self): + return self.info + + # TODO: find better way to get a custom traceback # pylint: disable=W0511 + def _render_traceback_(self): + return [str(self)] + + +class InvalidP4InfoError(Exception): + """ + Invalid P4 info exception. + """ + def __init__(self, info=""): + super().__init__() + self.info = info + + def __str__(self): + return f"Invalid P4Info message: {self.info}" + + def _render_traceback_(self): + return [str(self)] + + +class UnknownOptionName(UserError): + """ + Unknown option name exception. + """ + def __init__(self, option_name): + super().__init__() + self.option_name = option_name + + def __str__(self): + return f"Unknown option name: {self.option_name}" + + +class InvalidOptionValueType(UserError): + """ + Invalid option value type exception. + """ + def __init__(self, option, value): + super().__init__() + self.option = option + self.value = value + + def __str__(self): + return f"Invalid value type for option {self.option.name}. "\ + "Expected {self.option.value.__name__} but got "\ + "value {self.value} with type {type(self.value).__name__}" + + +class UserBadIPv4Error(UserError): + """ + Invalid IPv4 address value exception. + """ + def __init__(self, addr): + super().__init__() + self.addr = addr + + def __str__(self): + return f"{self.addr}' is not a valid IPv4 address" + + def _render_traceback_(self): + return [str(self)] + + +class UserBadIPv6Error(UserError): + """ + Invalid IPv6 address value exception. + """ + def __init__(self, addr): + super().__init__() + self.addr = addr + + def __str__(self): + return f"'{self.addr}' is not a valid IPv6 address" + + def _render_traceback_(self): + return [str(self)] + + +class UserBadMacError(UserError): + """ + Invalid MAC address value exception. + """ + def __init__(self, addr): + super().__init__() + self.addr = addr + + def __str__(self): + return f"'{self.addr}' is not a valid MAC address" + + def _render_traceback_(self): + return [str(self)] + + +class UserBadValueError(UserError): + """ + Invalid value exception. + """ + def __init__(self, info=""): + super().__init__() + self.info = info + + def __str__(self): + return self.info + + def _render_traceback_(self): + return [str(self)] diff --git a/src/device/service/drivers/p4/p4_global_options.py b/src/device/service/drivers/p4/p4_global_options.py new file mode 100644 index 0000000000000000000000000000000000000000..86043b671e9316dfeff2fb12db8ab3088386382a --- /dev/null +++ b/src/device/service/drivers/p4/p4_global_options.py @@ -0,0 +1,204 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4Runtime global options. +""" + +import enum +try: + from .p4_exception import UnknownOptionName, InvalidOptionValueType +except ImportError: + from p4_exception import UnknownOptionName, InvalidOptionValueType + + +@enum.unique +class Options(enum.Enum): + """ + P4 options. + """ + canonical_bytestrings = bool + + +class GlobalOptions: + """ + P4 global options. + """ + option_defaults = { + Options.canonical_bytestrings: True, + } + + option_helpstrings = { + Options.canonical_bytestrings: """ +Use byte-padded legacy format for binary strings sent to the P4Runtime server, +instead of the canonical representation. See P4Runtime specification for details. +""" + } + + def __init__(self): + self._values = {} + self.reset() + self._option_names = [option.name for option in Options] + self._set_docstring() + + def reset(self): + """ + Reset all options to their defaults. + + :return: void + """ + for option in Options: + assert option in GlobalOptions.option_defaults + self._values[option] = GlobalOptions.option_defaults[option] + + def _supported_options_as_str(self): + """ + Return a comma-separated string of supported options. + + :return: string of supported options + """ + return ", ".join([f"{o.name} ({o.value.__name__})" for o in Options]) + + def _supported_options_as_str_verbose(self): + """ + Return a detailed comma-separated string of supported options. + + :return: string of supported options + """ + opt_str = "" + for option in Options: + opt_str += f"Option name: {option.name}\n" + opt_str += f"Type: {option.value.__name__}\n" + opt_str += f"Default value: " \ + f"{GlobalOptions.option_defaults[option]}\n" + opt_str += f"Description: " \ + f"{GlobalOptions.option_helpstrings.get(option, 'N/A')}\n" + opt_str += "\n" + return opt_str[:-1] + + def _set_docstring(self): + """ + Set the documentation for this object. + + :return: void + """ + self.__doc__ = f""" +Manage global options for the P4Runtime shell. +Supported options are: {self._supported_options_as_str()} +To set the value of a global option, use GLOBAL_OPTIONS["