diff --git a/.gitignore b/.gitignore index 20b98c30c5b3edb0983578b0a5f74fb1c1f3025e..e1f87cfd3842c264bd219237e9afe113d61c35bc 100644 --- a/.gitignore +++ b/.gitignore @@ -176,3 +176,6 @@ libyang/ # Other logs **/logs/*.log.* + +# PySpark checkpoints +src/analytics/.spark/* diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0c5ff9325944d1a5a54d941d32d6a45782257970..115b336761dd94902597c3b6e21e7d3dcf225af1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -48,6 +48,6 @@ include: - local: '/src/kpi_manager/.gitlab-ci.yml' - local: '/src/kpi_value_api/.gitlab-ci.yml' - local: '/src/kpi_value_writer/.gitlab-ci.yml' - + - local: '/src/telemetry/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/deploy/all.sh b/deploy/all.sh index f93cd92ac5e3189b0dc8fa71d74a586e929aaecc..06b8ee701530f56381080879d0e2941b664e5197 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -33,7 +33,7 @@ export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice n #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" # Uncomment to activate Monitoring Framework (new) -#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api" +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics" # Uncomment to activate BGP-LS Speaker #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" @@ -215,6 +215,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} # Deploy QuestDB ./deploy/qdb.sh +# Deploy Apache Kafka +./deploy/kafka.sh + # Expose Dashboard ./deploy/expose_dashboard.sh diff --git a/deploy/kafka.sh b/deploy/kafka.sh index 4a91bfc9e657d1b8a6a548b9c0a81a2f8a0b45e0..0483bce153b457800c6f7db2ef66685e90118111 100755 --- a/deploy/kafka.sh +++ b/deploy/kafka.sh @@ -20,50 +20,71 @@ # If not already set, set the namespace where Apache Kafka will be deployed. export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} +# If not already set, set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"} + +# If not already set, if flag is YES, Apache Kafka will be redeployed and all topics will be lost. +export KFK_REDEPLOY=${KFK_REDEPLOY:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## -# Constants -TMP_FOLDER="./tmp" -KFK_MANIFESTS_PATH="manifests/kafka" -KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml" -KFK_MANIFEST="02-kafka.yaml" + # Constants + TMP_FOLDER="./tmp" + KFK_MANIFESTS_PATH="manifests/kafka" + KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml" + KFK_MANIFEST="02-kafka.yaml" + + # Create a tmp folder for files modified during the deployment + TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests" + mkdir -p ${TMP_MANIFESTS_FOLDER} -# Create a tmp folder for files modified during the deployment -TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests" -mkdir -p ${TMP_MANIFESTS_FOLDER} +function kafka_deploy() { + # copy zookeeper and kafka manifest files to temporary manifest location + cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" + cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}" -# copy zookeeper and kafka manifest files to temporary manifest location -cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" -cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}" + # echo "Apache Kafka Namespace" + echo ">>> Delete Apache Kafka Namespace" + kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found -echo "Apache Kafka Namespace" -echo ">>> Delete Apache Kafka Namespace" -kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found + echo ">>> Create Apache Kafka Namespace" + kubectl create namespace ${KFK_NAMESPACE} -echo ">>> Create Apache Kafka Namespace" -kubectl create namespace ${KFK_NAMESPACE} + # echo ">>> Deplying Apache Kafka Zookeeper" + # Kafka zookeeper service should be deployed before the kafka service + kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" -echo ">>> Deplying Apache Kafka Zookeeper" -# Kafka zookeeper service should be deployed before the kafka service -kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" + KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically + KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') -KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically -KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') + # Kafka service should be deployed after the zookeeper service + sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" -# Kafka service should be deployed after the zookeeper service -sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + # echo ">>> Deploying Apache Kafka Broker" + kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" -echo ">>> Deploying Apache Kafka Broker" -kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + # echo ">>> Verifing Apache Kafka deployment" + sleep 5 + # KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods) + # if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then + # echo "Deployment Error: \n $KFK_PODS_STATUS" + # else + # echo "$KFK_PODS_STATUS" + # fi +} -echo ">>> Verifing Apache Kafka deployment" -sleep 10 -KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods) -if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then - echo "Deployment Error: \n $KFK_PODS_STATUS" +echo "Apache Kafka" +echo ">>> Checking if Apache Kafka is deployed ... " +if [ "$KFK_REDEPLOY" == "YES" ]; then + echo ">>> Redeploying kafka namespace" + kafka_deploy +elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then + echo ">>> Apache Kafka already present; skipping step." else - echo "$KFK_PODS_STATUS" -fi \ No newline at end of file + echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace" + kafka_deploy +fi +echo diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 62f36a2c138c99b1ee666c8c5397083266ad699d..189ae11e16e77196d6728482b7f16443149b60a9 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -115,6 +115,17 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"} export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} +# ----- Apache Kafka ------------------------------------------------------ + +# If not already set, set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} + +# If not already set, set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"} + +# If not already set, if flag is YES, Apache Kafka will be redeployed and topic will be lost. +export KFK_REDEPLOY=${KFK_REDEPLOY:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -147,7 +158,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type= --from-literal=CRDB_SSLMODE=require printf "\n" -echo "Create secret with CockroachDB data for KPI Management" +echo "Create secret with CockroachDB data for KPI Management microservices" CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt" # TODO: change by specific configurable environment variable kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ @@ -159,6 +170,37 @@ kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --t --from-literal=CRDB_SSLMODE=require printf "\n" +echo "Create secret with CockroachDB data for Telemetry microservices" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +CRDB_DATABASE_TELEMETRY="tfs_telemetry" # TODO: change by specific configurable environment variable +kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with CockroachDB data for Analytics microservices" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +CRDB_DATABASE_ANALYTICS="tfs_analytics" # TODO: change by specific configurable environment variable +kubectl create secret generic crdb-analytics --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE_ANALYTICS} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with Apache Kafka data for KPI, Telemetry and Analytics microservices" +KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}') +kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \ + --from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT} +printf "\n" + echo "Create secret with NATS data" NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') if [ -z "$NATS_CLIENT_PORT" ]; then @@ -187,6 +229,21 @@ kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type=' --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} printf "\n" +# Check if "dlt" is in the list of components +if [[ " ${TFS_COMPONENTS[@]} " =~ " dlt " ]]; then + echo "Create secret for HLF keystore" + kubectl create secret generic dlt-keystone --namespace ${TFS_K8S_NAMESPACE} --from-file=keystore=${KEY_DIRECTORY_PATH} + printf "\n" + + echo "Create secret for HLF signcerts" + kubectl create secret generic dlt-signcerts --namespace ${TFS_K8S_NAMESPACE} --from-file=signcerts.pem=${CERT_DIRECTORY_PATH} + printf "\n" + + echo "Create secret for HLF ca.crt" + kubectl create secret generic dlt-ca-crt --namespace ${TFS_K8S_NAMESPACE} --from-file=ca.crt=${TLS_CERT_PATH} + printf "\n" +fi + echo "Deploying components and collecting environment variables..." ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT @@ -234,15 +291,17 @@ for COMPONENT in $TFS_COMPONENTS; do if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" - elif [ "$COMPONENT" == "pathcomp" ]; then + elif [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" - # next command is redundant, but helpful to keep cache updated between rebuilds - IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" - $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + if [ "$COMPONENT" == "pathcomp" ]; then + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + fi elif [ "$COMPONENT" == "dlt" ]; then BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" @@ -255,7 +314,7 @@ for COMPONENT in $TFS_COMPONENTS; do echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." - if [ "$COMPONENT" == "pathcomp" ]; then + if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ] ; then IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" @@ -306,7 +365,7 @@ for COMPONENT in $TFS_COMPONENTS; do cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" fi - if [ "$COMPONENT" == "pathcomp" ]; then + if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" diff --git a/hackfest/containerlab/README.md b/hackfest/containerlab/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e08e944e991d8cb8599e89fab63f4b8b664ed73c --- /dev/null +++ b/hackfest/containerlab/README.md @@ -0,0 +1,35 @@ +# ContainerLab + +The setup consists of a management network for configuring and managing nodes. +srl1 and srl2 are interconnected. +client1 is connected to srl1 and client2 to srl2. +Routing between client1 and client2 is set up via the Nokia SR Linux nodes. + +## Management Network +Name: mgmt-net +Subnet: 172.100.100.0/24 + +## Node Kinds +Nokia SR Linux: Image ghcr.io/nokia/srlinux:23.10.3 +Linux: Image ghcr.io/hellt/network-multitool + +## Nodes + +### Nokia SR Linux +- Type: ixr6 +- CPU: 0.5 +- Memory: 2GB +- Management IP: 172.100.100.101 + +The provided SR Linux CLI commands in the _srl.cli_ enables system management and configures the GNMI server with OpenConfig models. + +### Linux + +Assigns IP 172.16.1.10/24 to eth1 and adds route to 172.16.2.0/24 via 172.16.1.1 + +In this topology file, the clients are pre-configured with the respectivly IP addresses in their interfaces and routes in their IP tables. + +### Links +- Connect srl1:e1-1 to srl2:e1-1 +- Connect client1:eth1 to srl1:e1-2 +- Connect client2:eth1 to srl2:e1-2 \ No newline at end of file diff --git a/hackfest/containerlab/commands.txt b/hackfest/containerlab/commands.txt index df5fbc0ce0163f4ce06b862e90e29854dbae204a..ac91d4b08b913209151e4024eb04b31384ed641a 100644 --- a/hackfest/containerlab/commands.txt +++ b/hackfest/containerlab/commands.txt @@ -83,19 +83,19 @@ $ssh admin@clab-tfs-scenario-srl1 # Check configurations done: -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl1-nis.json -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl1-ifs.json -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl2-nis.json -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl2-ifs.json +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl1-nis.json +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl1-ifs.json +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl2-nis.json +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl2-ifs.json # Delete elements: -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' # Run gNMI Driver in standalone mode (advanced) PYTHONPATH=./src python -m src.device.tests.test_gnmi diff --git a/hackfest/containerlab/srl.cli b/hackfest/containerlab/srl.cli new file mode 100644 index 0000000000000000000000000000000000000000..7d4987e22795ddd4667e030abddbc11827b9dc66 --- /dev/null +++ b/hackfest/containerlab/srl.cli @@ -0,0 +1,2 @@ +set / system management openconfig admin-state enable +set / system gnmi-server network-instance mgmt yang-models openconfig diff --git a/hackfest/containerlab/tfs-scenario.clab.yml b/hackfest/containerlab/tfs-scenario.clab.yml index f79378757827ff706be849b03277b947ee85f7fb..c715a1a539b59d807cd13c03f75f7de2e7bae084 100644 --- a/hackfest/containerlab/tfs-scenario.clab.yml +++ b/hackfest/containerlab/tfs-scenario.clab.yml @@ -23,35 +23,41 @@ mgmt: topology: kinds: - srl: - image: ghcr.io/nokia/srlinux:23.3.1 + nokia_srlinux: + image: ghcr.io/nokia/srlinux:23.10.3 linux: image: ghcr.io/hellt/network-multitool nodes: srl1: - kind: srl + kind: nokia_srlinux type: ixr6 cpu: 0.5 - memory: 1GB + memory: 2GB mgmt-ipv4: 172.100.100.101 - #startup-config: srl1.cli + startup-config: srl.cli srl2: - kind: srl + kind: nokia_srlinux type: ixr6 cpu: 0.5 - memory: 1GB + memory: 2GB mgmt-ipv4: 172.100.100.102 - #startup-config: srl2.cli + startup-config: srl.cli client1: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.201 + exec: + - ip address add 172.16.1.10/24 dev eth1 + - ip route add 172.16.2.0/24 via 172.16.1.1 client2: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.202 + exec: + - ip address add 172.16.2.10/24 dev eth1 + - ip route add 172.16.1.0/24 via 172.16.2.1 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] diff --git a/manifests/analyticsservice.yaml b/manifests/analyticsservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fa3ed0be6eda8cf944e199543e3c2cd59cc98d6 --- /dev/null +++ b/manifests/analyticsservice.yaml @@ -0,0 +1,128 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: analyticsservice +spec: + selector: + matchLabels: + app: analyticsservice + #replicas: 1 + template: + metadata: + labels: + app: analyticsservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: frontend + image: labs.etsi.org:5050/tfs/controller/analytics-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 30080 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: crdb-analytics + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30080"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30080"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: backend + image: labs.etsi.org:5050/tfs/controller/analytics-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 30090 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30090"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30090"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: analyticsservice + labels: + app: analyticsservice +spec: + type: ClusterIP + selector: + app: analyticsservice + ports: + - name: frontend-grpc + protocol: TCP + port: 30080 + targetPort: 30080 + - name: backend-grpc + protocol: TCP + port: 30090 + targetPort: 30090 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: analyticsservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: analyticsservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index 34f0d53c3116183696b6f7f49682596092f313ae..66bd3724c7a6244ea553d7199968c900bce0611c 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -12,6 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +apiVersion: v1 +kind: ConfigMap +metadata: + name: dlt-config +data: + CHANNEL_NAME: "tfs_channel" # Change according to your blockchain configuration + CHAINCODE_NAME: "tfs_dlt" # Change according to your blockchain configuration + MSP_ID: "ETSI" # Change according to your blockchain configuration + PEER_ENDPOINT: "127.0.0.1:7051" # Change according to your blockchain configuration + PEER_HOST_ALIAS: "peer0.org1.tfs.etsi.org" # Change according to your blockchain configuration + KEY_DIRECTORY_PATH: "/etc/hyperledger/fabric-keystore/keystore" + CERT_DIRECTORY_PATH: "/etc/hyperledger/fabric-signcerts/signcerts.pem" + TLS_CERT_PATH: "/etc/hyperledger/fabric-ca-crt/ca.crt" + +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -78,6 +93,52 @@ spec: limits: cpu: 700m memory: 1024Mi + volumeMounts: + - name: keystore + mountPath: /etc/hyperledger/fabric-keystore + readOnly: true + - name: signcerts + mountPath: /etc/hyperledger/fabric-signcerts + readOnly: true + - name: ca-crt + mountPath: /etc/hyperledger/fabric-ca-crt + readOnly: true + envFrom: + - configMapRef: + name: dlt-config + env: + - name: KEY_DIRECTORY_PATH + value: "/etc/hyperledger/fabric-keystore/keystore" + - name: CERT_DIRECTORY_PATH + value: "/etc/hyperledger/fabric-signcerts/signcerts.pem" + - name: TLS_CERT_PATH + value: "/etc/hyperledger/fabric-ca-crt/ca.crt" + volumes: + - name: keystore + secret: + secretName: dlt-keystone + - name: signcerts + secret: + secretName: dlt-signcerts + - name: ca-crt + secret: + secretName: dlt-ca-crt + +--- +apiVersion: v1 +kind: Service +metadata: + name: gatewayservice +spec: + selector: + app: dltservice + ports: + - protocol: TCP + port: 50051 + targetPort: 50051 + nodePort: 32001 + type: NodePort + --- apiVersion: v1 kind: Service diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index 9be6032cfbb59cb580219ca71451be24dac93205..8926dcdafdea90ad7dea41eca854cbcb30853553 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -38,6 +38,8 @@ spec: value: "INFO" - name: TOPOLOGY_ABSTRACTOR value: "DISABLE" + - name: DLT_INTEGRATION + value: "DISABLE" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10010"] diff --git a/manifests/kafka/02-kafka.yaml b/manifests/kafka/02-kafka.yaml index 8e4562e6eabec34bf3b87912310479bd98022aeb..8400f5944193458ccdad8be5dbc189f8f40cdd7b 100644 --- a/manifests/kafka/02-kafka.yaml +++ b/manifests/kafka/02-kafka.yaml @@ -53,9 +53,9 @@ spec: - name: KAFKA_LISTENERS value: PLAINTEXT://:9092 - name: KAFKA_ADVERTISED_LISTENERS - value: PLAINTEXT://localhost:9092 + value: PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092 image: wurstmeister/kafka imagePullPolicy: IfNotPresent name: kafka-broker ports: - - containerPort: 9092 \ No newline at end of file + - containerPort: 9092 diff --git a/manifests/kpi_value_apiservice.yaml b/manifests/kpi_value_apiservice.yaml index 74eb90f675794f1b451b04af55e191edff58fae5..e4dcb00545ffaa33de39fd29c029780b777ea91f 100644 --- a/manifests/kpi_value_apiservice.yaml +++ b/manifests/kpi_value_apiservice.yaml @@ -39,6 +39,9 @@ spec: env: - name: LOG_LEVEL value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:30020"] diff --git a/manifests/kpi_value_writerservice.yaml b/manifests/kpi_value_writerservice.yaml index 8a8e44ec2a571f1290e30a08d1c896a6339cbe46..e21e36f48ba08999f142e8548fed61cd2dfef0cc 100644 --- a/manifests/kpi_value_writerservice.yaml +++ b/manifests/kpi_value_writerservice.yaml @@ -39,6 +39,9 @@ spec: env: - name: LOG_LEVEL value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:30030"] diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f9917499a425b95d436ffa8cdb311d29483d2ca --- /dev/null +++ b/manifests/telemetryservice.yaml @@ -0,0 +1,128 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: telemetryservice +spec: + selector: + matchLabels: + app: telemetryservice + #replicas: 1 + template: + metadata: + labels: + app: telemetryservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: frontend + image: labs.etsi.org:5050/tfs/controller/telemetry-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 30050 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: crdb-telemetry + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30050"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30050"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: backend + image: labs.etsi.org:5050/tfs/controller/telemetry-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 30060 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30060"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30060"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: telemetryservice + labels: + app: telemetryservice +spec: + type: ClusterIP + selector: + app: telemetryservice + ports: + - name: frontend-grpc + protocol: TCP + port: 30050 + targetPort: 30050 + - name: backend-grpc + protocol: TCP + port: 30060 + targetPort: 30060 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: telemetryservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: telemetryservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/my_deploy.sh b/my_deploy.sh index b89df7481ebd17edf2b966eb818598d1a04a596f..88be82b63e9e79a97ee79702de886f69a6152f94 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -62,6 +62,18 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_gene # Uncomment to activate E2E Orchestrator #export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +#export TFS_COMPONENTS="${TFS_COMPONENTS} app" + + # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" @@ -181,3 +193,8 @@ export GRAF_EXT_PORT_HTTP="3000" # Set the namespace where Apache Kafka will be deployed. export KFK_NAMESPACE="kafka" +# Set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT="9092" + +# Set the flag to YES for redeploying of Apache Kafka +export KFK_REDEPLOY="" diff --git a/proto/analytics_frontend.proto b/proto/analytics_frontend.proto index 096c1ee035ae663359d9f4df1e071d3997a0d351..ace0581db816bee1d0d20746f2b864dce602567b 100644 --- a/proto/analytics_frontend.proto +++ b/proto/analytics_frontend.proto @@ -30,21 +30,25 @@ message AnalyzerId { } enum AnalyzerOperationMode { - ANALYZEROPERATIONMODE_BATCH = 0; - ANALYZEROPERATIONMODE_STREAMING = 1; + ANALYZEROPERATIONMODE_UNSPECIFIED = 0; + ANALYZEROPERATIONMODE_BATCH = 1; + ANALYZEROPERATIONMODE_STREAMING = 2; } +// duration field may be added in analyzer... message Analyzer { - string algorithm_name = 1; // The algorithm to be executed - repeated kpi_manager.KpiId input_kpi_ids = 2; // The KPI Ids to be processed by the analyzer - repeated kpi_manager.KpiId output_kpi_ids = 3; // The KPI Ids produced by the analyzer - AnalyzerOperationMode operation_mode = 4; // Operation mode of the analyzer - - // In batch mode... - float batch_min_duration_s = 5; // ..., min duration to collect before executing batch - float batch_max_duration_s = 6; // ..., max duration collected to execute the batch - uint64 batch_min_size = 7; // ..., min number of samples to collect before executing batch - uint64 batch_max_size = 8; // ..., max number of samples collected to execute the batch + AnalyzerId analyzer_id = 1; + string algorithm_name = 2; // The algorithm to be executed + float duration_s = 3; // Termiate the data analytics thread after duration (seconds); 0 = infinity time + repeated kpi_manager.KpiId input_kpi_ids = 4; // The KPI Ids to be processed by the analyzer + repeated kpi_manager.KpiId output_kpi_ids = 5; // The KPI Ids produced by the analyzer + AnalyzerOperationMode operation_mode = 6; // Operation mode of the analyzer + map<string, string> parameters = 7; // Add dictionary of (key, value) pairs such as (window_size, 10) etc. + // In batch mode... + float batch_min_duration_s = 8; // ..., min duration to collect before executing batch + float batch_max_duration_s = 9; // ..., max duration collected to execute the batch + uint64 batch_min_size = 10; // ..., min number of samples to collect before executing batch + uint64 batch_max_size = 11; // ..., max number of samples collected to execute the batch } message AnalyzerFilter { diff --git a/proto/context.proto b/proto/context.proto index 87f69132df022e2aa4a0766dc9f0a7a7fae36d59..fa9b1959b54746a6b9a426215874840e1cda8d10 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -214,6 +214,7 @@ enum DeviceDriverEnum { DEVICEDRIVER_OPTICAL_TFS = 9; DEVICEDRIVER_IETF_ACTN = 10; DEVICEDRIVER_OC = 11; + DEVICEDRIVER_QKD = 12; } enum DeviceOperationalStatusEnum { @@ -300,6 +301,7 @@ enum ServiceTypeEnum { SERVICETYPE_TE = 4; SERVICETYPE_E2E = 5; SERVICETYPE_OPTICAL_CONNECTIVITY = 6; + SERVICETYPE_QKD = 7; } enum ServiceStatusEnum { diff --git a/proto/telemetry_frontend.proto b/proto/telemetry_frontend.proto index dbc1e8bf688f9f2df341484c1929e2338c458bbf..614d10cf06cdbb1ff4fba6e51a39286eb5132688 100644 --- a/proto/telemetry_frontend.proto +++ b/proto/telemetry_frontend.proto @@ -19,9 +19,9 @@ import "context.proto"; import "kpi_manager.proto"; service TelemetryFrontendService { - rpc StartCollector (Collector ) returns (CollectorId ) {} - rpc StopCollector (CollectorId ) returns (context.Empty) {} - rpc SelectCollectors(CollectorFilter) returns (CollectorList) {} + rpc StartCollector (Collector ) returns (CollectorId ) {} + rpc StopCollector (CollectorId ) returns (context.Empty) {} + rpc SelectCollectors (CollectorFilter) returns (CollectorList) {} } message CollectorId { @@ -29,10 +29,12 @@ message CollectorId { } message Collector { - CollectorId collector_id = 1; // The Collector ID - kpi_manager.KpiId kpi_id = 2; // The KPI Id to be associated to the collected samples - float duration_s = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely - float interval_s = 4; // Interval between collected samples + CollectorId collector_id = 1; // The Collector ID + kpi_manager.KpiId kpi_id = 2; // The KPI Id to be associated to the collected samples + float duration_s = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely + float interval_s = 4; // Interval between collected samples + context.Timestamp start_time = 5; // Timestamp when Collector start execution + context.Timestamp end_time = 6; // Timestamp when Collector stop execution } message CollectorFilter { diff --git a/scripts/run_tests_locally-telemetry-mgtDB.sh b/scripts/run_tests_locally-analytics-DB.sh similarity index 69% rename from scripts/run_tests_locally-telemetry-mgtDB.sh rename to scripts/run_tests_locally-analytics-DB.sh index 8b68104eaf343b57ec4953334cda37167cca3529..9df5068d6bde361a4a1e73b96990c0d407c88cb4 100755 --- a/scripts/run_tests_locally-telemetry-mgtDB.sh +++ b/scripts/run_tests_locally-analytics-DB.sh @@ -17,10 +17,8 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src -# RCFILE=$PROJECTDIR/coverage/.coveragerc -# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ -# kpi_manager/tests/test_unitary.py - RCFILE=$PROJECTDIR/coverage/.coveragerc -python3 -m pytest --log-cli-level=INFO --verbose \ - telemetry/database/tests/managementDBtests.py +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + analytics/tests/test_analytics_db.py diff --git a/scripts/run_tests_locally-analytics-frontend.sh b/scripts/run_tests_locally-analytics-frontend.sh new file mode 100755 index 0000000000000000000000000000000000000000..e30d30da623b2d0eee3d925d69a846b4b1f516a3 --- /dev/null +++ b/scripts/run_tests_locally-analytics-frontend.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + analytics/frontend/tests/test_frontend.py diff --git a/scripts/run_tests_locally-kpi-manager.sh b/scripts/run_tests_locally-kpi-manager.sh index a6a24f90db93d56300ac997bd00675c479ef13ae..8a4ce8d95c74657451147078a1d93e891dfc2ac8 100755 --- a/scripts/run_tests_locally-kpi-manager.sh +++ b/scripts/run_tests_locally-kpi-manager.sh @@ -24,7 +24,7 @@ cd $PROJECTDIR/src # python3 kpi_manager/tests/test_unitary.py RCFILE=$PROJECTDIR/coverage/.coveragerc -CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}') +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ kpi_manager/tests/test_kpi_manager.py diff --git a/scripts/run_tests_locally-kpi-value-API.sh b/scripts/run_tests_locally-kpi-value-API.sh index 8dfbfb16237634519dcae2fcc34f850a5188c1e7..3953d2a89c6fbe2bd3546e648246b9b018e5fdb0 100755 --- a/scripts/run_tests_locally-kpi-value-API.sh +++ b/scripts/run_tests_locally-kpi-value-API.sh @@ -19,7 +19,8 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc - +KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") +KFK_SERVER_ADDRESS=${KAFKA_IP}:9092 # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG -o log_cli=true --verbose \ kpi_value_api/tests/test_kpi_value_api.py diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh index bb1c48b76440c00b398875a8f704c2a82ba4ab50..4b9a417603cc42a4e7e8b19c7394cc38633817fa 100755 --- a/scripts/run_tests_locally-telemetry-DB.sh +++ b/scripts/run_tests_locally-telemetry-DB.sh @@ -22,5 +22,5 @@ cd $PROJECTDIR/src # kpi_manager/tests/test_unitary.py RCFILE=$PROJECTDIR/coverage/.coveragerc -python3 -m pytest --log-cli-level=INFO --verbose \ - telemetry/database/tests/telemetryDBtests.py +python3 -m pytest --log-level=DEBUG --log-cli-level=debug --verbose \ + telemetry/tests/test_telemetryDB.py diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh index 9cf404ffcef6c99b261f81eb0c6b910dd60845e5..79db05fcf1259365e8a909ee99395eb59dfb9437 100755 --- a/scripts/run_tests_locally-telemetry-backend.sh +++ b/scripts/run_tests_locally-telemetry-backend.sh @@ -24,5 +24,5 @@ cd $PROJECTDIR/src # python3 kpi_manager/tests/test_unitary.py RCFILE=$PROJECTDIR/coverage/.coveragerc -python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \ - telemetry/backend/tests/testTelemetryBackend.py +python3 -m pytest --log-level=INFO --log-cli-level=debug --verbose \ + telemetry/backend/tests/test_TelemetryBackend.py diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh index 7652ccb583268285dcd2fcf3090b717dc18e4fc3..a2a1de52340cac527d4d1c446c76740d38ce7783 100755 --- a/scripts/run_tests_locally-telemetry-frontend.sh +++ b/scripts/run_tests_locally-telemetry-frontend.sh @@ -24,5 +24,5 @@ cd $PROJECTDIR/src # python3 kpi_manager/tests/test_unitary.py RCFILE=$PROJECTDIR/coverage/.coveragerc -python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \ +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ telemetry/frontend/tests/test_frontend.py diff --git a/scripts/show_logs_analytics_backend.sh b/scripts/show_logs_analytics_backend.sh new file mode 100755 index 0000000000000000000000000000000000000000..afb58567ca5ab250da48d2cfffa2c56abdff2db2 --- /dev/null +++ b/scripts/show_logs_analytics_backend.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/analyticsservice -c backend diff --git a/scripts/show_logs_analytics_frontend.sh b/scripts/show_logs_analytics_frontend.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d3fae10b366f0082d3a393c224e8f1cb7830721 --- /dev/null +++ b/scripts/show_logs_analytics_frontend.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/analyticsservice -c frontend diff --git a/src/analytics/README.md b/src/analytics/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9663e5321ace6866491b90553553d9ccbf5793a1 --- /dev/null +++ b/src/analytics/README.md @@ -0,0 +1,4 @@ +# How to locally run and test Analytic service (To be added soon) + +### Pre-requisets +The following requirements should be fulfilled before the execuation of Telemetry service. diff --git a/src/telemetry/database/tests/__init__.py b/src/analytics/__init__.py similarity index 94% rename from src/telemetry/database/tests/__init__.py rename to src/analytics/__init__.py index 839e45e3b646bc60de7edd81fcfb91b7b38feadf..bbfc943b68af13a11e562abbc8680ade71db8f02 100644 --- a/src/telemetry/database/tests/__init__.py +++ b/src/analytics/__init__.py @@ -10,4 +10,4 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file +# limitations under the License. diff --git a/src/analytics/backend/Dockerfile b/src/analytics/backend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..17adcd3ab1df5704cc7ef0c5a19b3cfb1539ee22 --- /dev/null +++ b/src/analytics/backend/Dockerfile @@ -0,0 +1,69 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/analytics/backend +WORKDIR /var/teraflow/analytics/backend +COPY src/analytics/backend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/analytics/__init__.py analytics/__init__.py +COPY src/analytics/backend/. analytics/backend/ + +# Start the service +ENTRYPOINT ["python", "-m", "analytics.backend.service"] diff --git a/src/analytics/backend/__init__.py b/src/analytics/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/analytics/backend/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/telemetry/database/tests/managementDBtests.py b/src/analytics/backend/requirements.in similarity index 70% rename from src/telemetry/database/tests/managementDBtests.py rename to src/analytics/backend/requirements.in index 24138abe42be742bd9b16d7840343f9d7c7fe133..9df678fe819f33d479b8f5090ca9ac4eb1f4047c 100644 --- a/src/telemetry/database/tests/managementDBtests.py +++ b/src/analytics/backend/requirements.in @@ -12,11 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from telemetry.database.managementDB import managementDB -from telemetry.database.tests.messages import create_collector_model_object - - -def test_add_row_to_db(): - managementDBobj = managementDB() - managementDBobj.add_row_to_db(create_collector_model_object()) \ No newline at end of file +pyspark==3.5.2 +confluent-kafka==2.3.* diff --git a/src/analytics/backend/service/AnalyticsBackendService.py b/src/analytics/backend/service/AnalyticsBackendService.py new file mode 100755 index 0000000000000000000000000000000000000000..595603567fe537d9f7b33224cba0fe016a439631 --- /dev/null +++ b/src/analytics/backend/service/AnalyticsBackendService.py @@ -0,0 +1,132 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import logging +import threading +from common.tools.service.GenericGrpcService import GenericGrpcService +from analytics.backend.service.SparkStreaming import SparkStreamer +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import KafkaError +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc + + +LOGGER = logging.getLogger(__name__) + +class AnalyticsBackendService(GenericGrpcService): + """ + Class listens for ... + """ + def __init__(self, cls_name : str = __name__) -> None: + LOGGER.info('Init AnalyticsBackendService') + port = get_service_port_grpc(ServiceNameEnum.ANALYTICSBACKEND) + super().__init__(port, cls_name=cls_name) + self.running_threads = {} # To keep track of all running analyzers + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'analytics-frontend', + 'auto.offset.reset' : 'latest'}) + + def StartSparkStreamer(self, analyzer_uuid, analyzer): + kpi_list = analyzer['input_kpis'] + oper_list = [s.replace('_value', '') for s in list(analyzer["thresholds"].keys())] # TODO: update this line... + thresholds = analyzer['thresholds'] + window_size = analyzer['window_size'] + window_slider = analyzer['window_slider'] + print ("Received parameters: {:} - {:} - {:} - {:} - {:}".format( + kpi_list, oper_list, thresholds, window_size, window_slider)) + LOGGER.debug ("Received parameters: {:} - {:} - {:} - {:} - {:}".format( + kpi_list, oper_list, thresholds, window_size, window_slider)) + try: + stop_event = threading.Event() + thread = threading.Thread(target=SparkStreamer, + args=(analyzer_uuid, kpi_list, oper_list, thresholds, stop_event, + window_size, window_slider, None )) + self.running_threads[analyzer_uuid] = (thread, stop_event) + thread.start() + print ("Initiated Analyzer backend: {:}".format(analyzer_uuid)) + LOGGER.info("Initiated Analyzer backend: {:}".format(analyzer_uuid)) + return True + except Exception as e: + print ("Failed to initiate Analyzer backend: {:}".format(e)) + LOGGER.error("Failed to initiate Analyzer backend: {:}".format(e)) + return False + + def StopRequestListener(self, threadInfo: tuple): + try: + thread, stop_event = threadInfo + stop_event.set() + thread.join() + print ("Terminating Analytics backend RequestListener") + LOGGER.info("Terminating Analytics backend RequestListener") + return True + except Exception as e: + print ("Failed to terminate analytics backend {:}".format(e)) + LOGGER.error("Failed to terminate analytics backend {:}".format(e)) + return False + + def install_services(self): + stop_event = threading.Event() + thread = threading.Thread(target=self.RequestListener, + args=(stop_event,) ) + thread.start() + return (thread, stop_event) + + def RequestListener(self, stop_event): + """ + listener for requests on Kafka topic. + """ + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.ANALYTICS_REQUEST.value]) + while not stop_event.is_set(): + receive_msg = consumer.poll(2.0) + if receive_msg is None: + continue + elif receive_msg.error(): + if receive_msg.error().code() == KafkaError._PARTITION_EOF: + continue + else: + print("Consumer error: {}".format(receive_msg.error())) + break + analyzer = json.loads(receive_msg.value().decode('utf-8')) + analyzer_uuid = receive_msg.key().decode('utf-8') + LOGGER.debug('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer)) + print ('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer)) + + if analyzer["algo_name"] is None and analyzer["oper_mode"] is None: + self.TerminateAnalyzerBackend(analyzer_uuid) + else: + self.StartSparkStreamer(analyzer_uuid, analyzer) + LOGGER.debug("Stop Event activated. Terminating...") + print ("Stop Event activated. Terminating...") + + def TerminateAnalyzerBackend(self, analyzer_uuid): + if analyzer_uuid in self.running_threads: + try: + thread, stop_event = self.running_threads[analyzer_uuid] + stop_event.set() + thread.join() + del self.running_threads[analyzer_uuid] + print ("Terminating backend (by TerminateBackend): Analyzer Id: {:}".format(analyzer_uuid)) + LOGGER.info("Terminating backend (by TerminateBackend): Analyzer Id: {:}".format(analyzer_uuid)) + return True + except Exception as e: + LOGGER.error("Failed to terminate. Analyzer Id: {:} - ERROR: {:}".format(analyzer_uuid, e)) + return False + else: + print ("Analyzer not found in active collectors. Analyzer Id: {:}".format(analyzer_uuid)) + LOGGER.warning("Analyzer not found in active collectors: Analyzer Id: {:}".format(analyzer_uuid)) + # generate confirmation towards frontend diff --git a/src/analytics/backend/service/SparkStreaming.py b/src/analytics/backend/service/SparkStreaming.py new file mode 100644 index 0000000000000000000000000000000000000000..96e1aa05d898ffdd23c533b74ee87fbf03f54576 --- /dev/null +++ b/src/analytics/backend/service/SparkStreaming.py @@ -0,0 +1,154 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, time +from pyspark.sql import SparkSession +from pyspark.sql.types import StructType, StructField, StringType, DoubleType, TimestampType +from pyspark.sql.functions import from_json, col, window, avg, min, max, first, last, stddev, when, round +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic + +LOGGER = logging.getLogger(__name__) + +def DefiningSparkSession(): + # Create a Spark session with specific spark verions (3.5.0) + return SparkSession.builder \ + .appName("Analytics") \ + .config("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true") \ + .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.5.0") \ + .getOrCreate() + +def SettingKafkaConsumerParams(): # TODO: create get_kafka_consumer() in common with inputs (bootstrap server, subscribe, startingOffset and failOnDataLoss with default values) + return { + # "kafka.bootstrap.servers": '127.0.0.1:9092', + "kafka.bootstrap.servers": KafkaConfig.get_kafka_address(), + "subscribe" : KafkaTopic.VALUE.value, + "startingOffsets" : 'latest', + "failOnDataLoss" : 'false' # Optional: Set to "true" to fail the query on data loss + } + +def DefiningRequestSchema(): + return StructType([ + StructField("time_stamp" , StringType() , True), + StructField("kpi_id" , StringType() , True), + StructField("kpi_value" , DoubleType() , True) + ]) + +def GetAggregations(oper_list): + # Define the possible aggregation functions + agg_functions = { + 'avg' : round(avg ("kpi_value"), 3) .alias("avg_value"), + 'min' : round(min ("kpi_value"), 3) .alias("min_value"), + 'max' : round(max ("kpi_value"), 3) .alias("max_value"), + 'first': round(first ("kpi_value"), 3) .alias("first_value"), + 'last' : round(last ("kpi_value"), 3) .alias("last_value"), + 'stdev': round(stddev ("kpi_value"), 3) .alias("stdev_value") + } + return [agg_functions[op] for op in oper_list if op in agg_functions] # Filter and return only the selected aggregations + +def ApplyThresholds(aggregated_df, thresholds): + # Apply thresholds (TH-Fail and TH-RAISE) based on the thresholds dictionary on the aggregated DataFrame. + + # Loop through each column name and its associated thresholds + for col_name, (fail_th, raise_th) in thresholds.items(): + # Apply TH-Fail condition (if column value is less than the fail threshold) + aggregated_df = aggregated_df.withColumn( + f"{col_name}_THRESHOLD_FAIL", + when(col(col_name) < fail_th, True).otherwise(False) + ) + # Apply TH-RAISE condition (if column value is greater than the raise threshold) + aggregated_df = aggregated_df.withColumn( + f"{col_name}_THRESHOLD_RAISE", + when(col(col_name) > raise_th, True).otherwise(False) + ) + return aggregated_df + +def SparkStreamer(key, kpi_list, oper_list, thresholds, stop_event, + window_size=None, win_slide_duration=None, time_stamp_col=None): + """ + Method to perform Spark operation Kafka stream. + NOTE: Kafka topic to be processesd should have atleast one row before initiating the spark session. + """ + kafka_consumer_params = SettingKafkaConsumerParams() # Define the Kafka consumer parameters + schema = DefiningRequestSchema() # Define the schema for the incoming JSON data + spark = DefiningSparkSession() # Define the spark session with app name and spark version + + # extra options default assignment + if window_size is None: window_size = "60 seconds" # default + if win_slide_duration is None: win_slide_duration = "30 seconds" # default + if time_stamp_col is None: time_stamp_col = "time_stamp" # default + + try: + # Read data from Kafka + raw_stream_data = spark \ + .readStream \ + .format("kafka") \ + .options(**kafka_consumer_params) \ + .load() + + # Convert the value column from Kafka to a string + stream_data = raw_stream_data.selectExpr("CAST(value AS STRING)") + # Parse the JSON string into a DataFrame with the defined schema + parsed_stream_data = stream_data.withColumn("parsed_value", from_json(col("value"), schema)) + # Select the parsed fields + final_stream_data = parsed_stream_data.select("parsed_value.*") + # Convert the time_stamp to proper timestamp (assuming it's in ISO format) + final_stream_data = final_stream_data.withColumn(time_stamp_col, col(time_stamp_col).cast(TimestampType())) + # Filter the stream to only include rows where the kpi_id is in the kpi_list + filtered_stream_data = final_stream_data.filter(col("kpi_id").isin(kpi_list)) + # Define a window for aggregation + windowed_stream_data = filtered_stream_data \ + .groupBy( + window( col(time_stamp_col), + window_size, slideDuration=win_slide_duration + ), + col("kpi_id") + ) \ + .agg(*GetAggregations(oper_list)) + # Apply thresholds to the aggregated data + thresholded_stream_data = ApplyThresholds(windowed_stream_data, thresholds) + + # --- This will write output on console: FOR TESTING PURPOSES + # Start the Spark streaming query + # query = thresholded_stream_data \ + # .writeStream \ + # .outputMode("update") \ + # .format("console") + + # --- This will write output to Kafka: ACTUAL IMPLEMENTATION + query = thresholded_stream_data \ + .selectExpr(f"'{key}' AS key", "to_json(struct(*)) AS value") \ + .writeStream \ + .format("kafka") \ + .option("kafka.bootstrap.servers", KafkaConfig.get_kafka_address()) \ + .option("topic", KafkaTopic.ANALYTICS_RESPONSE.value) \ + .option("checkpointLocation", "analytics/.spark/checkpoint") \ + .outputMode("update") + + # Start the query execution + queryHandler = query.start() + + # Loop to check for stop event flag. To be set by stop collector method. + while True: + if stop_event.is_set(): + LOGGER.debug("Stop Event activated. Terminating in 5 seconds...") + print ("Stop Event activated. Terminating in 5 seconds...") + time.sleep(5) + queryHandler.stop() + break + time.sleep(5) + + except Exception as e: + print("Error in Spark streaming process: {:}".format(e)) + LOGGER.debug("Error in Spark streaming process: {:}".format(e)) diff --git a/src/analytics/backend/service/__init__.py b/src/analytics/backend/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/analytics/backend/service/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/analytics/backend/service/__main__.py b/src/analytics/backend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4c36b7c7bd952164bf9e48a45e22fb00575564 --- /dev/null +++ b/src/analytics/backend/service/__main__.py @@ -0,0 +1,56 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port +from .AnalyticsBackendService import AnalyticsBackendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + grpc_service = AnalyticsBackendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/analytics/backend/tests/__init__.py b/src/analytics/backend/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/analytics/backend/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/analytics/backend/tests/messages.py b/src/analytics/backend/tests/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..9acd6ad9dffe4a5b10b107a6923ed85170ee141f --- /dev/null +++ b/src/analytics/backend/tests/messages.py @@ -0,0 +1,34 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def get_kpi_id_list(): + return ["6e22f180-ba28-4641-b190-2287bf448888", "1e22f180-ba28-4641-b190-2287bf446666"] + +def get_operation_list(): + return [ 'avg', 'max' ] # possibilities ['avg', 'min', 'max', 'first', 'last', 'stdev'] + +def get_threshold_dict(): + threshold_dict = { + 'avg_value' : (20, 30), + 'min_value' : (00, 10), + 'max_value' : (45, 50), + 'first_value' : (00, 10), + 'last_value' : (40, 50), + 'stdev_value' : (00, 10), + } + # Filter threshold_dict based on the operation_list + return { + op + '_value': threshold_dict[op+'_value'] for op in get_operation_list() if op + '_value' in threshold_dict + } diff --git a/src/analytics/backend/tests/test_backend.py b/src/analytics/backend/tests/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..2f40faba94ef7081db609116e8fd869e3d119a24 --- /dev/null +++ b/src/analytics/backend/tests/test_backend.py @@ -0,0 +1,64 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import logging +import threading +from common.tools.kafka.Variables import KafkaTopic +from analytics.backend.service.AnalyticsBackendService import AnalyticsBackendService +from analytics.backend.tests.messages import get_kpi_id_list, get_operation_list, get_threshold_dict + +LOGGER = logging.getLogger(__name__) + + +########################### +# Tests Implementation of Telemetry Backend +########################### + +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +# def test_StartRequestListener(): +# LOGGER.info('test_RunRequestListener') +# AnalyticsBackendServiceObj = AnalyticsBackendService() +# response = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event) +# LOGGER.debug(str(response)) +# assert isinstance(response, tuple) + +# To test START and STOP communication together +def test_StopRequestListener(): + LOGGER.info('test_RunRequestListener') + LOGGER.info('Initiating StartRequestListener...') + AnalyticsBackendServiceObj = AnalyticsBackendService() + response_thread = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event) + # LOGGER.debug(str(response_thread)) + time.sleep(10) + LOGGER.info('Initiating StopRequestListener...') + AnalyticsBackendServiceObj = AnalyticsBackendService() + response = AnalyticsBackendServiceObj.StopRequestListener(response_thread) + LOGGER.debug(str(response)) + assert isinstance(response, bool) + +# To independently tests the SparkListener functionality +# def test_SparkListener(): +# LOGGER.info('test_RunRequestListener') +# AnalyticsBackendServiceObj = AnalyticsBackendService() +# response = AnalyticsBackendServiceObj.RunSparkStreamer( +# get_kpi_id_list(), get_operation_list(), get_threshold_dict() +# ) +# LOGGER.debug(str(response)) +# assert isinstance(response, bool) diff --git a/src/analytics/database/AnalyzerEngine.py b/src/analytics/database/AnalyzerEngine.py new file mode 100644 index 0000000000000000000000000000000000000000..9294e09966ef9e13c9cfa3cab590e5d0c8b6a80e --- /dev/null +++ b/src/analytics/database/AnalyzerEngine.py @@ -0,0 +1,40 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sqlalchemy +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' + +class AnalyzerEngine: + @staticmethod + def get_engine() -> sqlalchemy.engine.Engine: + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = "tfs-analyzer" # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) + try: + engine = sqlalchemy.create_engine(crdb_uri, echo=False) + LOGGER.info(' AnalyzerDB initalized with DB URL: {:}'.format(crdb_uri)) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) + return None # type: ignore + return engine diff --git a/src/analytics/database/AnalyzerModel.py b/src/analytics/database/AnalyzerModel.py new file mode 100644 index 0000000000000000000000000000000000000000..c33e396e06a8dce96a86951a64aa59b510931dfe --- /dev/null +++ b/src/analytics/database/AnalyzerModel.py @@ -0,0 +1,106 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import enum + +from sqlalchemy import Column, String, Float, Enum, BigInteger, JSON +from sqlalchemy.orm import registry +from common.proto import analytics_frontend_pb2 +from common.proto import kpi_manager_pb2 + +from sqlalchemy.dialects.postgresql import UUID, ARRAY + + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +# Create a base class for declarative models +Base = registry().generate_base() + +class AnalyzerOperationMode (enum.Enum): + BATCH = analytics_frontend_pb2.AnalyzerOperationMode.ANALYZEROPERATIONMODE_BATCH + STREAMING = analytics_frontend_pb2.AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING + +class Analyzer(Base): + __tablename__ = 'analyzer' + + analyzer_id = Column( UUID(as_uuid=False) , primary_key=True) + algorithm_name = Column( String , nullable=False ) + input_kpi_ids = Column( ARRAY(UUID(as_uuid=False)) , nullable=False ) + output_kpi_ids = Column( ARRAY(UUID(as_uuid=False)) , nullable=False ) + operation_mode = Column( Enum(AnalyzerOperationMode), nullable=False ) + parameters = Column( JSON , nullable=True ) + batch_min_duration_s = Column( Float , nullable=False ) + batch_max_duration_s = Column( Float , nullable=False ) + batch_min_size = Column( BigInteger , nullable=False ) + batch_max_size = Column( BigInteger , nullable=False ) + + # helps in logging the information + def __repr__(self): + return (f"<Analyzer(analyzer_id='{self.analyzer_id}' , algorithm_name='{self.algorithm_name}', " + f"input_kpi_ids={self.input_kpi_ids} , output_kpi_ids={self.output_kpi_ids}, " + f"operation_mode='{self.operation_mode}' , parameters={self.parameters}, " + f"batch_min_duration_s={self.batch_min_duration_s} , batch_max_duration_s={self.batch_max_duration_s}, " + f"batch_min_size={self.batch_min_size} , batch_max_size={self.batch_max_size})>") + + + @classmethod + def ConvertAnalyzerToRow(cls, request): + """ + Create an instance of Analyzer table rows from a request object. + Args: request: The request object containing analyzer gRPC message. + Returns: A row (an instance of Analyzer table) initialized with content of the request. + """ + return cls( + analyzer_id = request.analyzer_id.analyzer_id.uuid, + algorithm_name = request.algorithm_name, + input_kpi_ids = [k.kpi_id.uuid for k in request.input_kpi_ids], + output_kpi_ids = [k.kpi_id.uuid for k in request.output_kpi_ids], + operation_mode = AnalyzerOperationMode(request.operation_mode), # converts integer to coresponding Enum class member + parameters = dict(request.parameters), + batch_min_duration_s = request.batch_min_duration_s, + batch_max_duration_s = request.batch_max_duration_s, + batch_min_size = request.batch_min_size, + batch_max_size = request.batch_max_size + ) + + @classmethod + def ConvertRowToAnalyzer(cls, row): + """ + Create and return an Analyzer gRPC message initialized with the content of a row. + Args: row: The Analyzer table instance (row) containing the data. + Returns: An Analyzer gRPC message initialized with the content of the row. + """ + # Create an instance of the Analyzer message + response = analytics_frontend_pb2.Analyzer() + response.analyzer_id.analyzer_id.uuid = row.analyzer_id + response.algorithm_name = row.algorithm_name + response.operation_mode = row.operation_mode.value + response.parameters.update(row.parameters) + + for input_kpi_id in row.input_kpi_ids: + _kpi_id = kpi_manager_pb2.KpiId() + _kpi_id.kpi_id.uuid = input_kpi_id + response.input_kpi_ids.append(_kpi_id) + for output_kpi_id in row.output_kpi_ids: + _kpi_id = kpi_manager_pb2.KpiId() + _kpi_id.kpi_id.uuid = output_kpi_id + response.output_kpi_ids.append(_kpi_id) + + response.batch_min_duration_s = row.batch_min_duration_s + response.batch_max_duration_s = row.batch_max_duration_s + response.batch_min_size = row.batch_min_size + response.batch_max_size = row.batch_max_size + return response diff --git a/src/analytics/database/Analyzer_DB.py b/src/analytics/database/Analyzer_DB.py new file mode 100644 index 0000000000000000000000000000000000000000..1ba68989a066e4638adc12e65289ed50b740731d --- /dev/null +++ b/src/analytics/database/Analyzer_DB.py @@ -0,0 +1,150 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sqlalchemy_utils + +from sqlalchemy import inspect, or_ +from sqlalchemy.orm import sessionmaker + +from analytics.database.AnalyzerModel import Analyzer as AnalyzerModel +from analytics.database.AnalyzerEngine import AnalyzerEngine +from common.method_wrappers.ServiceExceptions import (OperationFailedException, AlreadyExistsException) + +LOGGER = logging.getLogger(__name__) +DB_NAME = "tfs_analyzer" # TODO: export name from enviornment variable + +class AnalyzerDB: + def __init__(self): + self.db_engine = AnalyzerEngine.get_engine() + if self.db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return False + self.db_name = DB_NAME + self.Session = sessionmaker(bind=self.db_engine) + + def create_database(self): + if not sqlalchemy_utils.database_exists(self.db_engine.url): + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) + sqlalchemy_utils.create_database(self.db_engine.url) + + def drop_database(self) -> None: + if sqlalchemy_utils.database_exists(self.db_engine.url): + sqlalchemy_utils.drop_database(self.db_engine.url) + + def create_tables(self): + try: + AnalyzerModel.metadata.create_all(self.db_engine) # type: ignore + LOGGER.debug("Tables created in the database: {:}".format(self.db_name)) + except Exception as e: + LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e))) + raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)]) + + def verify_tables(self): + try: + inspect_object = inspect(self.db_engine) + if(inspect_object.has_table('analyzer', None)): + LOGGER.info("Table exists in DB: {:}".format(self.db_name)) + except Exception as e: + LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) + +# ----------------- CURD OPERATIONS --------------------- + + def add_row_to_db(self, row): + session = self.Session() + try: + session.add(row) + session.commit() + LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.") + return True + except Exception as e: + session.rollback() + if "psycopg2.errors.UniqueViolation" in str(e): + LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") + raise AlreadyExistsException(row.__class__.__name__, row, + extra_details=["Unique key voilation: {:}".format(e)] ) + else: + LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def search_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + entity = session.query(model).filter_by(**{col_name: id_to_search}).first() + if entity: + # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}") + return entity + else: + LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}") + print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) + return None + except Exception as e: + session.rollback() + LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") + raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) + finally: + session.close() + + def delete_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + record = session.query(model).filter_by(**{col_name: id_to_search}).first() + if record: + session.delete(record) + session.commit() + LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) + else: + LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search) + return None + except Exception as e: + session.rollback() + LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def select_with_filter(self, model, filter_object): + session = self.Session() + try: + query = session.query(AnalyzerModel) + + # Apply filters based on the filter_object + if filter_object.analyzer_id: + query = query.filter(AnalyzerModel.analyzer_id.in_([a.analyzer_id.uuid for a in filter_object.analyzer_id])) + + if filter_object.algorithm_names: + query = query.filter(AnalyzerModel.algorithm_name.in_(filter_object.algorithm_names)) + + if filter_object.input_kpi_ids: + input_kpi_uuids = [k.kpi_id.uuid for k in filter_object.input_kpi_ids] + query = query.filter(AnalyzerModel.input_kpi_ids.op('&&')(input_kpi_uuids)) + + if filter_object.output_kpi_ids: + output_kpi_uuids = [k.kpi_id.uuid for k in filter_object.output_kpi_ids] + query = query.filter(AnalyzerModel.output_kpi_ids.op('&&')(output_kpi_uuids)) + + result = query.all() + # query should be added to return all rows + if result: + LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") # - Results: {result} + else: + LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}") + return result + except Exception as e: + LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}") + raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)]) + finally: + session.close() diff --git a/src/telemetry/frontend/tests/__init__.py b/src/analytics/database/__init__.py similarity index 100% rename from src/telemetry/frontend/tests/__init__.py rename to src/analytics/database/__init__.py diff --git a/src/analytics/frontend/Dockerfile b/src/analytics/frontend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..10499713f318a23e1aeab49c96e8163a5ec147fa --- /dev/null +++ b/src/analytics/frontend/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/analytics/frontend +WORKDIR /var/teraflow/analytics/frontend +COPY src/analytics/frontend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/analytics/__init__.py analytics/__init__.py +COPY src/analytics/frontend/. analytics/frontend/ +COPY src/analytics/database/. analytics/database/ + +# Start the service +ENTRYPOINT ["python", "-m", "analytics.frontend.service"] diff --git a/src/analytics/frontend/__init__.py b/src/analytics/frontend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/client/AnalyticsFrontendClient.py b/src/analytics/frontend/client/AnalyticsFrontendClient.py new file mode 100644 index 0000000000000000000000000000000000000000..90e95d661d46f24ae5ffaeb7bcfa19b7e1f36526 --- /dev/null +++ b/src/analytics/frontend/client/AnalyticsFrontendClient.py @@ -0,0 +1,68 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.Constants import ServiceNameEnum +from common.proto.context_pb2 import Empty +from common.proto.analytics_frontend_pb2_grpc import AnalyticsFrontendServiceStub +from common.proto.analytics_frontend_pb2 import AnalyzerId, Analyzer, AnalyzerFilter, AnalyzerList +from common.Settings import get_service_host, get_service_port_grpc +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.client.RetryDecorator import retry, delay_exponential + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 10 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class AnalyticsFrontendClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.ANALYTICSFRONTEND) + if not port: port = get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = AnalyticsFrontendServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def StartAnalyzer (self, request: Analyzer) -> AnalyzerId: #type: ignore + LOGGER.debug('StartAnalyzer: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.StartAnalyzer(request) + LOGGER.debug('StartAnalyzer result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def StopAnalyzer(self, request : AnalyzerId) -> Empty: # type: ignore + LOGGER.debug('StopAnalyzer: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.StopAnalyzer(request) + LOGGER.debug('StopAnalyzer result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SelectAnalyzers(self, request : AnalyzerFilter) -> AnalyzerList: # type: ignore + LOGGER.debug('SelectAnalyzers: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SelectAnalyzers(request) + LOGGER.debug('SelectAnalyzers result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/analytics/frontend/client/__init__.py b/src/analytics/frontend/client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/requirements.in b/src/analytics/frontend/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..d81b9ddbeafeff94c830d48ca5594e775b9ce240 --- /dev/null +++ b/src/analytics/frontend/requirements.in @@ -0,0 +1,20 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apscheduler==3.10.4 +confluent-kafka==2.3.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/analytics/frontend/service/AnalyticsFrontendService.py b/src/analytics/frontend/service/AnalyticsFrontendService.py new file mode 100644 index 0000000000000000000000000000000000000000..42a7fc9b60418c1c0fc5af6f320ae5c330ce8871 --- /dev/null +++ b/src/analytics/frontend/service/AnalyticsFrontendService.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.service.GenericGrpcService import GenericGrpcService +from common.proto.analytics_frontend_pb2_grpc import add_AnalyticsFrontendServiceServicer_to_server +from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl + +class AnalyticsFrontendService(GenericGrpcService): + def __init__(self, cls_name: str = __name__): + port = get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND) + super().__init__(port, cls_name=cls_name) + self.analytics_frontend_servicer = AnalyticsFrontendServiceServicerImpl() + + def install_servicers(self): + add_AnalyticsFrontendServiceServicer_to_server(self.analytics_frontend_servicer, self.server) diff --git a/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..8bb6a17afb5b911e3652fdb8d1853b5b7bc6faf3 --- /dev/null +++ b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py @@ -0,0 +1,214 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, grpc, json, queue + +from typing import Dict +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import Producer as KafkaProducer +from confluent_kafka import KafkaError + +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from common.proto.context_pb2 import Empty +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.analytics_frontend_pb2 import Analyzer, AnalyzerId, AnalyzerFilter, AnalyzerList +from common.proto.analytics_frontend_pb2_grpc import AnalyticsFrontendServiceServicer +from analytics.database.Analyzer_DB import AnalyzerDB +from analytics.database.AnalyzerModel import Analyzer as AnalyzerModel +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.interval import IntervalTrigger + +LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('AnalyticsFrontend', 'NBIgRPC') + +class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer): + def __init__(self): + LOGGER.info('Init AnalyticsFrontendService') + self.listener_topic = KafkaTopic.ANALYTICS_RESPONSE.value + self.db_obj = AnalyzerDB() + self.result_queue = queue.Queue() + self.scheduler = BackgroundScheduler() + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'analytics-frontend', + 'auto.offset.reset' : 'latest'}) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StartAnalyzer(self, + request : Analyzer, grpc_context: grpc.ServicerContext # type: ignore + ) -> AnalyzerId: # type: ignore + LOGGER.info ("At Service gRPC message: {:}".format(request)) + response = AnalyzerId() + + self.db_obj.add_row_to_db( + AnalyzerModel.ConvertAnalyzerToRow(request) + ) + self.PublishStartRequestOnKafka(request) + + response.analyzer_id.uuid = request.analyzer_id.analyzer_id.uuid + return response + + def PublishStartRequestOnKafka(self, analyzer_obj): + """ + Method to generate analyzer request on Kafka. + """ + analyzer_uuid = analyzer_obj.analyzer_id.analyzer_id.uuid + analyzer_to_generate : Dict = { + "algo_name" : analyzer_obj.algorithm_name, + "input_kpis" : [k.kpi_id.uuid for k in analyzer_obj.input_kpi_ids], + "output_kpis" : [k.kpi_id.uuid for k in analyzer_obj.output_kpi_ids], + "oper_mode" : analyzer_obj.operation_mode, + "thresholds" : json.loads(analyzer_obj.parameters["thresholds"]), + "window_size" : analyzer_obj.parameters["window_size"], + "window_slider" : analyzer_obj.parameters["window_slider"], + # "store_aggregate" : analyzer_obj.parameters["store_aggregate"] + } + self.kafka_producer.produce( + KafkaTopic.ANALYTICS_REQUEST.value, + key = analyzer_uuid, + value = json.dumps(analyzer_to_generate), + callback = self.delivery_callback + ) + LOGGER.info("Analyzer Start Request Generated: Analyzer Id: {:}, Value: {:}".format(analyzer_uuid, analyzer_to_generate)) + self.kafka_producer.flush() + + # self.StartResponseListener(analyzer_uuid) + + def StartResponseListener(self, filter_key=None): + """ + Start the Kafka response listener with APScheduler and return key-value pairs periodically. + """ + LOGGER.info("Starting StartResponseListener") + # Schedule the ResponseListener at fixed intervals + self.scheduler.add_job( + self.response_listener, + trigger=IntervalTrigger(seconds=5), + args=[filter_key], + id=f"response_listener_{self.listener_topic}", + replace_existing=True + ) + self.scheduler.start() + LOGGER.info(f"Started Kafka listener for topic {self.listener_topic}...") + try: + while True: + LOGGER.info("entering while...") + key, value = self.result_queue.get() # Wait until a result is available + LOGGER.info("In while true ...") + yield key, value # Yield the result to the calling function + except KeyboardInterrupt: + LOGGER.warning("Listener stopped manually.") + finally: + self.StopListener() + + def response_listener(self, filter_key=None): + """ + Poll Kafka messages and put key-value pairs into the queue. + """ + LOGGER.info(f"Polling Kafka topic {self.listener_topic}...") + + consumer = self.kafka_consumer + consumer.subscribe([self.listener_topic]) + msg = consumer.poll(2.0) + if msg is None: + return + elif msg.error(): + if msg.error().code() != KafkaError._PARTITION_EOF: + LOGGER.error(f"Kafka error: {msg.error()}") + return + + try: + key = msg.key().decode('utf-8') if msg.key() else None + if filter_key is not None and key == filter_key: + value = json.loads(msg.value().decode('utf-8')) + LOGGER.info(f"Received key: {key}, value: {value}") + self.result_queue.put((key, value)) + else: + LOGGER.info(f"Skipping message with unmatched key: {key}") + # value = json.loads(msg.value().decode('utf-8')) # Added for debugging + # self.result_queue.put((filter_key, value)) # Added for debugging + except Exception as e: + LOGGER.error(f"Error processing Kafka message: {e}") + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StopAnalyzer(self, + request : AnalyzerId, grpc_context: grpc.ServicerContext # type: ignore + ) -> Empty: # type: ignore + LOGGER.info ("At Service gRPC message: {:}".format(request)) + try: + analyzer_id_to_delete = request.analyzer_id.uuid + self.db_obj.delete_db_row_by_id( + AnalyzerModel, "analyzer_id", analyzer_id_to_delete + ) + self.PublishStopRequestOnKafka(analyzer_id_to_delete) + except Exception as e: + LOGGER.error('Unable to delete analyzer. Error: {:}'.format(e)) + return Empty() + + def PublishStopRequestOnKafka(self, analyzer_uuid): + """ + Method to generate stop analyzer request on Kafka. + """ + # analyzer_uuid = analyzer_id.analyzer_id.uuid + analyzer_to_stop : Dict = { + "algo_name" : None, + "input_kpis" : [], + "output_kpis" : [], + "oper_mode" : None + } + self.kafka_producer.produce( + KafkaTopic.ANALYTICS_REQUEST.value, + key = analyzer_uuid, + value = json.dumps(analyzer_to_stop), + callback = self.delivery_callback + ) + LOGGER.info("Analyzer Stop Request Generated: Analyzer Id: {:}".format(analyzer_uuid)) + self.kafka_producer.flush() + self.StopListener() + + def StopListener(self): + """ + Gracefully stop the Kafka listener and the scheduler. + """ + LOGGER.info("Stopping Kafka listener...") + self.scheduler.shutdown() + LOGGER.info("Kafka listener stopped.") + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SelectAnalyzers(self, + filter : AnalyzerFilter, contextgrpc_context: grpc.ServicerContext # type: ignore + ) -> AnalyzerList: # type: ignore + LOGGER.info("At Service gRPC message: {:}".format(filter)) + response = AnalyzerList() + try: + rows = self.db_obj.select_with_filter(AnalyzerModel, filter) + try: + for row in rows: + response.analyzer_list.append( + AnalyzerModel.ConvertRowToAnalyzer(row) + ) + return response + except Exception as e: + LOGGER.info('Unable to process filter response {:}'.format(e)) + except Exception as e: + LOGGER.error('Unable to apply filter on table {:}. ERROR: {:}'.format(AnalyzerModel.__name__, e)) + + + def delivery_callback(self, err, msg): + if err: + LOGGER.debug('Message delivery failed: {:}'.format(err)) + print ('Message delivery failed: {:}'.format(err)) + # else: + # LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) + # print('Message delivered to topic {:}'.format(msg.topic())) diff --git a/src/analytics/frontend/service/__init__.py b/src/analytics/frontend/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/service/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/service/__main__.py b/src/analytics/frontend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c331844f45d98095ef98951f3db43a0e2f0c69c --- /dev/null +++ b/src/analytics/frontend/service/__main__.py @@ -0,0 +1,56 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port +from .AnalyticsFrontendService import AnalyticsFrontendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + grpc_service = AnalyticsFrontendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/analytics/frontend/tests/__init__.py b/src/analytics/frontend/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/tests/messages.py b/src/analytics/frontend/tests/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..646de962e8a213582fdb7cd1446ab57bda561a96 --- /dev/null +++ b/src/analytics/frontend/tests/messages.py @@ -0,0 +1,84 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import json +from common.proto.kpi_manager_pb2 import KpiId +from common.proto.analytics_frontend_pb2 import ( AnalyzerOperationMode, AnalyzerId, + Analyzer, AnalyzerFilter ) + +def create_analyzer_id(): + _create_analyzer_id = AnalyzerId() + # _create_analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) + _create_analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6" + return _create_analyzer_id + +def create_analyzer(): + _create_analyzer = Analyzer() + # _create_analyzer.analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) + _create_analyzer.analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6" + _create_analyzer.algorithm_name = "Test_Aggergate_and_Threshold" + _create_analyzer.operation_mode = AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING + + _kpi_id = KpiId() + # input IDs to analyze + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf448888" + _create_analyzer.input_kpi_ids.append(_kpi_id) + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _kpi_id.kpi_id.uuid = "1e22f180-ba28-4641-b190-2287bf446666" + _create_analyzer.input_kpi_ids.append(_kpi_id) + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_analyzer.input_kpi_ids.append(_kpi_id) + # output IDs after analysis + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_analyzer.output_kpi_ids.append(_kpi_id) + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_analyzer.output_kpi_ids.append(_kpi_id) + # parameter + _threshold_dict = { + # 'avg_value' :(20, 30), 'min_value' :(00, 10), 'max_value' :(45, 50), + 'first_value' :(00, 10), 'last_value' :(40, 50), 'stdev_value':(00, 10)} + _create_analyzer.parameters['thresholds'] = json.dumps(_threshold_dict) + _create_analyzer.parameters['window_size'] = "60 seconds" # Such as "10 seconds", "2 minutes", "3 hours", "4 days" or "5 weeks" + _create_analyzer.parameters['window_slider'] = "30 seconds" # should be less than window size + _create_analyzer.parameters['store_aggregate'] = str(False) # TRUE to store. No implemented yet + + return _create_analyzer + +def create_analyzer_filter(): + _create_analyzer_filter = AnalyzerFilter() + + _analyzer_id_obj = AnalyzerId() + # _analyzer_id_obj.analyzer_id.uuid = str(uuid.uuid4()) + _analyzer_id_obj.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6" + _create_analyzer_filter.analyzer_id.append(_analyzer_id_obj) + + _create_analyzer_filter.algorithm_names.append('Test_Aggergate_and_Threshold') + + # _input_kpi_id_obj = KpiId() + # _input_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.input_kpi_ids.append(_input_kpi_id_obj) + # another input kpi Id + # _input_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.input_kpi_ids.append(_input_kpi_id_obj) + + # _output_kpi_id_obj = KpiId() + # _output_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.output_kpi_ids.append(_output_kpi_id_obj) + # # another output kpi Id + # _output_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.input_kpi_ids.append(_output_kpi_id_obj) + + return _create_analyzer_filter diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py new file mode 100644 index 0000000000000000000000000000000000000000..d2428c01fb021f71a884d9a99c446bfef6e66559 --- /dev/null +++ b/src/analytics/frontend/tests/test_frontend.py @@ -0,0 +1,134 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +import json +import pytest +import logging +import threading + +from common.Constants import ServiceNameEnum +from common.proto.context_pb2 import Empty +from common.Settings import ( get_service_port_grpc, get_env_var_name, + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC ) + +from common.tools.kafka.Variables import KafkaTopic +from common.proto.analytics_frontend_pb2 import AnalyzerId, AnalyzerList +from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient +from analytics.frontend.service.AnalyticsFrontendService import AnalyticsFrontendService +from analytics.frontend.tests.messages import ( create_analyzer_id, create_analyzer, + create_analyzer_filter ) +from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.interval import IntervalTrigger + + +########################### +# Tests Setup +########################### + +LOCAL_HOST = '127.0.0.1' + +ANALYTICS_FRONTEND_PORT = str(get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND)) +os.environ[get_env_var_name(ServiceNameEnum.ANALYTICSFRONTEND, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.ANALYTICSFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(ANALYTICS_FRONTEND_PORT) + +LOGGER = logging.getLogger(__name__) + +@pytest.fixture(scope='session') +def analyticsFrontend_service(): + LOGGER.info('Initializing AnalyticsFrontendService...') + + _service = AnalyticsFrontendService() + _service.start() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding AnalyticsFrontendService...') + yield _service + + LOGGER.info('Terminating AnalyticsFrontendService...') + _service.stop() + + LOGGER.info('Terminated AnalyticsFrontendService...') + +@pytest.fixture(scope='session') +def analyticsFrontend_client(analyticsFrontend_service : AnalyticsFrontendService): + LOGGER.info('Initializing AnalyticsFrontendClient...') + + _client = AnalyticsFrontendClient() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding AnalyticsFrontendClient...') + yield _client + + LOGGER.info('Closing AnalyticsFrontendClient...') + _client.close() + + LOGGER.info('Closed AnalyticsFrontendClient...') + + +########################### +# Tests Implementation of Analytics Frontend +########################### + +# --- "test_validate_kafka_topics" should be executed before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +# ----- core funtionality test ----- +# def test_StartAnalytics(analyticsFrontend_client): +# LOGGER.info(' >>> test_StartAnalytic START: <<< ') +# response = analyticsFrontend_client.StartAnalyzer(create_analyzer()) +# LOGGER.debug(str(response)) +# assert isinstance(response, AnalyzerId) + +# To test start and stop listener together +def test_StartStopAnalyzers(analyticsFrontend_client): + LOGGER.info(' >>> test_StartStopAnalyzers START: <<< ') + LOGGER.info('--> StartAnalyzer') + added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer()) + LOGGER.debug(str(added_analyzer_id)) + LOGGER.info(' --> Calling StartResponseListener... ') + class_obj = AnalyticsFrontendServiceServicerImpl() + response = class_obj.StartResponseListener(added_analyzer_id.analyzer_id._uuid) + LOGGER.debug(response) + LOGGER.info("waiting for timer to comlete ...") + time.sleep(3) + LOGGER.info('--> StopAnalyzer') + response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id) + LOGGER.debug(str(response)) + +# def test_SelectAnalytics(analyticsFrontend_client): +# LOGGER.info(' >>> test_SelectAnalytics START: <<< ') +# response = analyticsFrontend_client.SelectAnalyzers(create_analyzer_filter()) +# LOGGER.debug(str(response)) +# assert isinstance(response, AnalyzerList) + +# def test_StopAnalytic(analyticsFrontend_client): +# LOGGER.info(' >>> test_StopAnalytic START: <<< ') +# response = analyticsFrontend_client.StopAnalyzer(create_analyzer_id()) +# LOGGER.debug(str(response)) +# assert isinstance(response, Empty) + +# def test_ResponseListener(): +# LOGGER.info(' >>> test_ResponseListener START <<< ') +# analyzer_id = create_analyzer_id() +# LOGGER.debug("Starting Response Listener for Analyzer ID: {:}".format(analyzer_id.analyzer_id.uuid)) +# class_obj = AnalyticsFrontendServiceServicerImpl() +# for response in class_obj.StartResponseListener(analyzer_id.analyzer_id.uuid): +# LOGGER.debug(response) +# assert isinstance(response, tuple) \ No newline at end of file diff --git a/src/analytics/requirements.in b/src/analytics/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..8ff30ddaad25c39713f2e6f68c8d9aebed74dad0 --- /dev/null +++ b/src/analytics/requirements.in @@ -0,0 +1,21 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +java==11.0.* +pyspark==3.5.2 +confluent-kafka==2.3.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/analytics/tests/__init__.py b/src/analytics/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/tests/test_analytics_db.py b/src/analytics/tests/test_analytics_db.py new file mode 100644 index 0000000000000000000000000000000000000000..58e7d0167044bb461e66b053dcb3999641ea8419 --- /dev/null +++ b/src/analytics/tests/test_analytics_db.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from analytics.database.Analyzer_DB import AnalyzerDB + +LOGGER = logging.getLogger(__name__) + +def test_verify_databases_and_tables(): + LOGGER.info('>>> test_verify_databases_and_tables : START <<< ') + AnalyzerDBobj = AnalyzerDB() + # AnalyzerDBobj.drop_database() + # AnalyzerDBobj.verify_tables() + AnalyzerDBobj.create_database() + AnalyzerDBobj.create_tables() + AnalyzerDBobj.verify_tables() diff --git a/src/common/Constants.py b/src/common/Constants.py index 767b21343f89e35c2338b522bcdc71c56aca1815..74490321f9c8ec016fa4b48b583e2217c61710ec 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -65,6 +65,9 @@ class ServiceNameEnum(Enum): KPIVALUEAPI = 'kpi-value-api' KPIVALUEWRITER = 'kpi-value-writer' TELEMETRYFRONTEND = 'telemetry-frontend' + TELEMETRYBACKEND = 'telemetry-backend' + ANALYTICSFRONTEND = 'analytics-frontend' + ANALYTICSBACKEND = 'analytics-backend' # Used for test and debugging only DLT_GATEWAY = 'dltgateway' @@ -98,6 +101,9 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.KPIVALUEAPI .value : 30020, ServiceNameEnum.KPIVALUEWRITER .value : 30030, ServiceNameEnum.TELEMETRYFRONTEND .value : 30050, + ServiceNameEnum.TELEMETRYBACKEND .value : 30060, + ServiceNameEnum.ANALYTICSFRONTEND .value : 30080, + ServiceNameEnum.ANALYTICSBACKEND .value : 30090, # Used for test and debugging only ServiceNameEnum.DLT_GATEWAY .value : 50051, diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 9ed321d5328aa17a856a3a6401bc35576eef679f..23ebe19d681bd0ba774c8f3f4435c233369d0e28 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -47,6 +47,7 @@ class DeviceTypeEnum(Enum): PACKET_ROUTER = 'packet-router' PACKET_SWITCH = 'packet-switch' XR_CONSTELLATION = 'xr-constellation' + QKD_NODE = 'qkd-node' # ETSI TeraFlowSDN controller TERAFLOWSDN_CONTROLLER = 'teraflowsdn' diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py index 71b3999bf6e42c3cd9130747af2cdcbe2d9a570e..d86a769ef8f2ab120b42d0b12f93530e8c71c2a3 100644 --- a/src/common/method_wrappers/Decorator.py +++ b/src/common/method_wrappers/Decorator.py @@ -235,3 +235,35 @@ def safe_and_metered_rpc_method(metrics_pool : MetricsPool, logger : logging.Log grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) return inner_wrapper return outer_wrapper + +def safe_and_metered_rpc_method_async(metrics_pool: MetricsPool, logger: logging.Logger): + def outer_wrapper(func): + method_name = func.__name__ + metrics = metrics_pool.get_metrics(method_name) + histogram_duration, counter_started, counter_completed, counter_failed = metrics + + async def inner_wrapper(self, request, grpc_context: grpc.aio.ServicerContext): + counter_started.inc() + try: + logger.debug('{:s} request: {:s}'.format(method_name, grpc_message_to_json_string(request))) + reply = await func(self, request, grpc_context) + logger.debug('{:s} reply: {:s}'.format(method_name, grpc_message_to_json_string(reply))) + counter_completed.inc() + return reply + except ServiceException as e: # pragma: no cover (ServiceException not thrown) + if e.code not in [grpc.StatusCode.NOT_FOUND, grpc.StatusCode.ALREADY_EXISTS]: + # Assume not found or already exists is just a condition, not an error + logger.exception('{:s} exception'.format(method_name)) + counter_failed.inc() + else: + counter_completed.inc() + await grpc_context.abort(e.code, e.details) + except Exception as e: # pragma: no cover, pylint: disable=broad-except + logger.exception('{:s} exception'.format(method_name)) + counter_failed.inc() + await grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) + + return inner_wrapper + + return outer_wrapper + diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py index 24ae2cff7b5e710e18999eb09029216a4a5d6c8a..fc43c315114e7b51c4e2604afbb14e165796e7c5 100644 --- a/src/common/tools/kafka/Variables.py +++ b/src/common/tools/kafka/Variables.py @@ -14,23 +14,40 @@ import logging from enum import Enum -from confluent_kafka import KafkaException from confluent_kafka.admin import AdminClient, NewTopic +from common.Settings import get_setting LOGGER = logging.getLogger(__name__) +KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-service.{:s}.svc.cluster.local:{:s}' class KafkaConfig(Enum): - # SERVER_IP = "127.0.0.1:9092" - SERVER_IP = "kafka-service.kafka.svc.cluster.local:9092" - ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_IP}) + + @staticmethod + def get_kafka_address() -> str: + # kafka_server_address = get_setting('KFK_SERVER_ADDRESS', default=None) + # if kafka_server_address is None: + KFK_NAMESPACE = get_setting('KFK_NAMESPACE') + KFK_PORT = get_setting('KFK_SERVER_PORT') + kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT) + return kafka_server_address + + @staticmethod + def get_admin_client(): + SERVER_ADDRESS = KafkaConfig.get_kafka_address() + ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_ADDRESS }) + return ADMIN_CLIENT + class KafkaTopic(Enum): - REQUEST = 'topic_request' - RESPONSE = 'topic_response' - RAW = 'topic_raw' - LABELED = 'topic_labeled' - VALUE = 'topic_value' + # TODO: Later to be populated from ENV variable. + REQUEST = 'topic_request' + RESPONSE = 'topic_response' + RAW = 'topic_raw' + LABELED = 'topic_labeled' + VALUE = 'topic_value' + ANALYTICS_REQUEST = 'topic_request_analytics' + ANALYTICS_RESPONSE = 'topic_response_analytics' @staticmethod def create_all_topics() -> bool: @@ -38,8 +55,9 @@ class KafkaTopic(Enum): Method to create Kafka topics defined as class members """ all_topics = [member.value for member in KafkaTopic] + LOGGER.debug("Kafka server address is: {:} ".format(KafkaConfig.get_kafka_address())) if( KafkaTopic.create_new_topic_if_not_exists( all_topics )): - LOGGER.debug("All topics are created sucsessfully") + LOGGER.debug("All topics are created sucsessfully or Already Exists") return True else: LOGGER.debug("Error creating all topics") @@ -55,14 +73,14 @@ class KafkaTopic(Enum): LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics)) for topic in new_topics: try: - topic_metadata = KafkaConfig.ADMIN_CLIENT.value.list_topics(timeout=5) + topic_metadata = KafkaConfig.get_admin_client().list_topics(timeout=5) # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics)) if topic not in topic_metadata.topics: # If the topic does not exist, create a new topic print("Topic {:} does not exist. Creating...".format(topic)) LOGGER.debug("Topic {:} does not exist. Creating...".format(topic)) new_topic = NewTopic(topic, num_partitions=1, replication_factor=1) - KafkaConfig.ADMIN_CLIENT.value.create_topics([new_topic]) + KafkaConfig.get_admin_client().create_topics([new_topic]) else: print("Topic name already exists: {:}".format(topic)) LOGGER.debug("Topic name already exists: {:}".format(topic)) diff --git a/src/common/tools/service/GenericGrpcServiceAsync.py b/src/common/tools/service/GenericGrpcServiceAsync.py new file mode 100644 index 0000000000000000000000000000000000000000..488d861777ee7200fc4331449f21dded6b2f6dac --- /dev/null +++ b/src/common/tools/service/GenericGrpcServiceAsync.py @@ -0,0 +1,72 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union +import grpc +import logging +from concurrent import futures +from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH +from grpc_health.v1.health_pb2 import HealthCheckResponse +from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server +from common.Settings import get_grpc_bind_address, get_grpc_grace_period, get_grpc_max_workers + +class GenericGrpcServiceAsync: + def __init__( + self, bind_port: Union[str, int], bind_address: Optional[str] = None, max_workers: Optional[int] = None, + grace_period: Optional[int] = None, enable_health_servicer: bool = True, cls_name: str = __name__ + ) -> None: + self.logger = logging.getLogger(cls_name) + self.bind_port = bind_port + self.bind_address = get_grpc_bind_address() if bind_address is None else bind_address + self.max_workers = get_grpc_max_workers() if max_workers is None else max_workers + self.grace_period = get_grpc_grace_period() if grace_period is None else grace_period + self.enable_health_servicer = enable_health_servicer + self.endpoint = None + self.health_servicer = None + self.pool = None + self.server = None + + async def install_servicers(self): + pass + + async def start(self): + self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port)) + self.logger.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format( + str(self.endpoint), str(self.max_workers))) + + self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers) + self.server = grpc.aio.server(self.pool) + + await self.install_servicers() # Ensure this is awaited + + if self.enable_health_servicer: + self.health_servicer = HealthServicer( + experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1)) + add_HealthServicer_to_server(self.health_servicer, self.server) + + self.bind_port = self.server.add_insecure_port(self.endpoint) + self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port)) + self.logger.info('Listening on {:s}...'.format(str(self.endpoint))) + await self.server.start() + if self.enable_health_servicer: + self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) + + self.logger.debug('Service started') + + async def stop(self): + self.logger.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period))) + if self.enable_health_servicer: + self.health_servicer.enter_graceful_shutdown() + await self.server.stop(self.grace_period) + self.logger.debug('Service stopped') diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py index 06d73177036d8bfe8b6f72d6c97b03f78aaf7531..cf900ed6df3b4699a4e56f53873174ddd997cd53 100644 --- a/src/context/service/database/models/enums/DeviceDriver.py +++ b/src/context/service/database/models/enums/DeviceDriver.py @@ -34,6 +34,7 @@ class ORM_DeviceDriverEnum(enum.Enum): OPTICAL_TFS = DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS IETF_ACTN = DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN OC = DeviceDriverEnum.DEVICEDRIVER_OC + QKD = DeviceDriverEnum.DEVICEDRIVER_QKD grpc_to_enum__device_driver = functools.partial( grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py index 62d5380b56803b3cc21dd1456292ec9df470cb15..cd6819999a585b541ef716fa481910ae3b53ecbf 100644 --- a/src/context/service/database/models/enums/ServiceType.py +++ b/src/context/service/database/models/enums/ServiceType.py @@ -29,6 +29,7 @@ class ORM_ServiceTypeEnum(enum.Enum): TE = ServiceTypeEnum.SERVICETYPE_TE E2E = ServiceTypeEnum.SERVICETYPE_E2E OPTICAL_CONNECTIVITY = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY + QKD = ServiceTypeEnum.SERVICETYPE_QKD grpc_to_enum__service_type = functools.partial( grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum) diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index cb6158b965a21c974605a27582340620f368bdb9..a5e7f377113342b98203a23a426540f6188f784e 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -178,3 +178,14 @@ if LOAD_ALL_DEVICE_DRIVERS: FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_OC, } ])) + +if LOAD_ALL_DEVICE_DRIVERS: + from .qkd.QKDDriver2 import QKDDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (QKDDriver, [ + { + # Close enough, it does optical switching + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.QKD_NODE, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_QKD, + } + ])) diff --git a/src/device/service/drivers/qkd/QKDDriver.py b/src/device/service/drivers/qkd/QKDDriver.py new file mode 100644 index 0000000000000000000000000000000000000000..a49144d6fc0840498c5f5f1267d1cef25cb1177a --- /dev/null +++ b/src/device/service/drivers/qkd/QKDDriver.py @@ -0,0 +1,168 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, requests, threading +from requests.auth import HTTPBasicAuth +from typing import Any, Iterator, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.type_checkers.Checkers import chk_string, chk_type +from device.service.driver_api._Driver import _Driver +from . import ALL_RESOURCE_KEYS +from .Tools import find_key, config_getter, create_connectivity_link + +LOGGER = logging.getLogger(__name__) + +DRIVER_NAME = 'qkd' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + + +class QKDDriver(_Driver): + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) + self.__lock = threading.Lock() + self.__started = threading.Event() + self.__terminate = threading.Event() + username = self.settings.get('username') + password = self.settings.get('password') + self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None + scheme = self.settings.get('scheme', 'http') + self.__qkd_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port)) + self.__timeout = int(self.settings.get('timeout', 120)) + self.__node_ids = set(self.settings.get('node_ids', [])) + token = self.settings.get('token') + self.__headers = {'Authorization': 'Bearer ' + token} + self.__initial_data = None + + def Connect(self) -> bool: + url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node' + with self.__lock: + if self.__started.is_set(): return True + r = None + try: + LOGGER.info(f'requests.get("{url}", timeout={self.__timeout}, verify=False, auth={self.__auth}, headers={self.__headers})') + r = requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth, headers=self.__headers) + LOGGER.info(f'R: {r}') + LOGGER.info(f'Text: {r.text}') + LOGGER.info(f'Json: {r.json()}') + except requests.exceptions.Timeout: + LOGGER.exception('Timeout connecting {:s}'.format(str(self.__qkd_root))) + return False + except Exception: # pylint: disable=broad-except + LOGGER.exception('Exception connecting {:s}'.format(str(self.__qkd_root))) + return False + else: + self.__started.set() + self.__initial_data = r.json() + return True + + def Disconnect(self) -> bool: + with self.__lock: + self.__terminate.set() + return True + + @metered_subclass_method(METRICS_POOL) + def GetInitialConfig(self) -> List[Tuple[str, Any]]: + with self.__lock: + return self.__initial_data + + @metered_subclass_method(METRICS_POOL) + def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: + chk_type('resources', resource_keys, list) + results = [] + with self.__lock: + if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS + for i, resource_key in enumerate(resource_keys): + str_resource_name = 'resource_key[#{:d}]'.format(i) + chk_string(str_resource_name, resource_key, allow_empty=False) + results.extend(config_getter( + self.__qkd_root, resource_key, timeout=self.__timeout, auth=self.__auth, + node_ids=self.__node_ids, headers=self.__headers)) + return results + + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: + return results + with self.__lock: + for resource_key, resource_value in resources: + LOGGER.info('resource = {:s}'.format(str(resource_key))) + + if resource_key.startswith('/link'): + try: + resource_value = json.loads(resource_value) + link_uuid = resource_value['uuid'] + + node_id_src = resource_value['src_qkdn_id'] + interface_id_src = resource_value['src_interface_id'] + node_id_dst = resource_value['dst_qkdn_id'] + interface_id_dst = resource_value['dst_interface_id'] + virt_prev_hop = resource_value.get('virt_prev_hop') + virt_next_hops = resource_value.get('virt_next_hops') + virt_bandwidth = resource_value.get('virt_bandwidth') + + + data = create_connectivity_link( + self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + virt_prev_hop, virt_next_hops, virt_bandwidth, + timeout=self.__timeout, auth=self.__auth, headers=self.__headers + ) + + #data = create_connectivity_link( + # self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + # timeout=self.__timeout, auth=self.__auth + #) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unhandled error processing resource_key({:s})'.format(str(resource_key))) + results.append(e) + else: + results.append(True) + + LOGGER.info('Test keys: ' + str([x for x,y in resources])) + LOGGER.info('Test values: ' + str(results)) + return results + + ''' + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: return results + with self.__lock: + for resource in resources: + LOGGER.info('resource = {:s}'.format(str(resource))) + uuid = find_key(resource, 'uuid') + results.extend(delete_connectivity_service( + self.__qkd_root, uuid, timeout=self.__timeout, auth=self.__auth)) + return results + ''' + + @metered_subclass_method(METRICS_POOL) + def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + # TODO: QKD API Driver does not support monitoring by now + LOGGER.info(f'Subscribe {self.address}: {subscriptions}') + return [True for _ in subscriptions] + + @metered_subclass_method(METRICS_POOL) + def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + # TODO: QKD API Driver does not support monitoring by now + return [False for _ in subscriptions] + + def GetState( + self, blocking=False, terminate : Optional[threading.Event] = None + ) -> Iterator[Tuple[float, str, Any]]: + # TODO: QKD API Driver does not support monitoring by now + LOGGER.info(f'GetState {self.address} called') + return [] diff --git a/src/device/service/drivers/qkd/QKDDriver2.py b/src/device/service/drivers/qkd/QKDDriver2.py new file mode 100644 index 0000000000000000000000000000000000000000..c73a83141d92955d01a6a00912389b671fe7ef98 --- /dev/null +++ b/src/device/service/drivers/qkd/QKDDriver2.py @@ -0,0 +1,216 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +import logging +import requests +import threading +from requests.auth import HTTPBasicAuth +from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.type_checkers.Checkers import chk_string, chk_type +from device.service.driver_api._Driver import _Driver +from .Tools2 import config_getter, create_connectivity_link +from device.service.driver_api._Driver import _Driver +from . import ALL_RESOURCE_KEYS + +LOGGER = logging.getLogger(__name__) + +DRIVER_NAME = 'qkd' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + + +class QKDDriver(_Driver): + def __init__(self, address: str, port: int, **settings) -> None: + LOGGER.info(f"Initializing QKDDriver with address={address}, port={port}, settings={settings}") + super().__init__(DRIVER_NAME, address, port, **settings) + self.__lock = threading.Lock() + self.__started = threading.Event() + self.__terminate = threading.Event() + self.__auth = None + self.__headers = {} + self.__qkd_root = os.getenv('QKD_API_URL', f"http://{self.address}:{self.port}") # Simplified URL management + self.__timeout = int(self.settings.get('timeout', 120)) + self.__node_ids = set(self.settings.get('node_ids', [])) + self.__initial_data = None + + # Optionally pass headers for authentication (e.g., JWT) + self.__headers = settings.get('headers', {}) + self.__auth = settings.get('auth', None) + + LOGGER.info(f"QKDDriver initialized with QKD root URL: {self.__qkd_root}") + + def Connect(self) -> bool: + url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node' + with self.__lock: + LOGGER.info(f"Starting connection to {url}") + if self.__started.is_set(): + LOGGER.info("Already connected, skipping re-connection.") + return True + + try: + LOGGER.info(f'Attempting to connect to {url} with headers {self.__headers} and timeout {self.__timeout}') + response = requests.get(url, timeout=self.__timeout, verify=False, headers=self.__headers, auth=self.__auth) + LOGGER.info(f'Received response: {response.status_code}, content: {response.text}') + response.raise_for_status() + self.__initial_data = response.json() + self.__started.set() + LOGGER.info('Connection successful') + return True + except requests.exceptions.RequestException as e: + LOGGER.error(f'Connection failed: {e}') + return False + + def Disconnect(self) -> bool: + LOGGER.info("Disconnecting QKDDriver") + with self.__lock: + self.__terminate.set() + LOGGER.info("QKDDriver disconnected successfully") + return True + + @metered_subclass_method(METRICS_POOL) + def GetInitialConfig(self) -> List[Tuple[str, Any]]: + LOGGER.info("Getting initial configuration") + with self.__lock: + if isinstance(self.__initial_data, dict): + initial_config = [('qkd_node', self.__initial_data.get('qkd_node', {}))] + LOGGER.info(f"Initial configuration: {initial_config}") + return initial_config + LOGGER.warning("Initial data is not a dictionary") + return [] + + @metered_subclass_method(METRICS_POOL) + def GetConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: + chk_type('resources', resource_keys, list) + LOGGER.info(f"Getting configuration for resource_keys: {resource_keys}") + results = [] + with self.__lock: + if not resource_keys: + resource_keys = ALL_RESOURCE_KEYS + for i, resource_key in enumerate(resource_keys): + chk_string(f'resource_key[{i}]', resource_key, allow_empty=False) + LOGGER.info(f"Retrieving resource key: {resource_key}") + resource_results = config_getter( + self.__qkd_root, resource_key, timeout=self.__timeout, headers=self.__headers, auth=self.__auth) + results.extend(resource_results) + LOGGER.info(f"Resource results for {resource_key}: {resource_results}") + LOGGER.info(f"Final configuration results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: + return results + + with self.__lock: + for resource_key, resource_value in resources: + LOGGER.info('Processing resource_key = {:s}'.format(str(resource_key))) + + # Only process '/link' keys + if resource_key.startswith('/link'): + try: + # Ensure resource_value is deserialized + if isinstance(resource_value, str): + resource_value = json.loads(resource_value) + + # Extract values from resource_value dictionary + link_uuid = resource_value['uuid'] + node_id_src = resource_value['src_qkdn_id'] + interface_id_src = resource_value['src_interface_id'] + node_id_dst = resource_value['dst_qkdn_id'] + interface_id_dst = resource_value['dst_interface_id'] + virt_prev_hop = resource_value.get('virt_prev_hop') + virt_next_hops = resource_value.get('virt_next_hops') + virt_bandwidth = resource_value.get('virt_bandwidth') + + # Call create_connectivity_link with the extracted values + LOGGER.info(f"Creating connectivity link with UUID: {link_uuid}") + data = create_connectivity_link( + self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + virt_prev_hop, virt_next_hops, virt_bandwidth, + timeout=self.__timeout, auth=self.__auth + ) + + # Append success result + results.append(True) + LOGGER.info(f"Connectivity link {link_uuid} created successfully") + + except Exception as e: + # Catch and log any unhandled exceptions + LOGGER.exception(f'Unhandled error processing resource_key({resource_key})') + results.append(e) + else: + # Skip unsupported resource keys and append success + results.append(True) + + # Logging test results + LOGGER.info('Test keys: ' + str([x for x,y in resources])) + LOGGER.info('Test values: ' + str(results)) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"Deleting configuration for resources: {resources}") + results = [] + if not resources: + LOGGER.warning("No resources provided for DeleteConfig") + return results + with self.__lock: + for resource in resources: + LOGGER.info(f'Resource to delete: {resource}') + uuid = resource[1].get('uuid') + if uuid: + LOGGER.info(f'Resource with UUID {uuid} deleted successfully') + results.append(True) + else: + LOGGER.warning(f"UUID not found in resource: {resource}") + results.append(False) + LOGGER.info(f"DeleteConfig results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def SubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"Subscribing to state updates: {subscriptions}") + results = [True for _ in subscriptions] + LOGGER.info(f"Subscription results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"Unsubscribing from state updates: {subscriptions}") + results = [True for _ in subscriptions] + LOGGER.info(f"Unsubscription results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def GetState(self, blocking=False, terminate: Optional[threading.Event] = None) -> Union[dict, list]: + LOGGER.info(f"GetState called with blocking={blocking}, terminate={terminate}") + url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node' + try: + LOGGER.info(f"Making GET request to {url} to retrieve state") + response = requests.get(url, timeout=self.__timeout, verify=False, headers=self.__headers, auth=self.__auth) + LOGGER.info(f"Received state response: {response.status_code}, content: {response.text}") + response.raise_for_status() + state_data = response.json() + LOGGER.info(f"State data retrieved: {state_data}") + return state_data + except requests.exceptions.Timeout: + LOGGER.error(f'Timeout getting state from {self.__qkd_root}') + return [] + except Exception as e: + LOGGER.error(f'Exception getting state from {self.__qkd_root}: {e}') + return [] diff --git a/src/device/service/drivers/qkd/Tools.py b/src/device/service/drivers/qkd/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..c17d01915dcdda55b36317c683fd60524c97239b --- /dev/null +++ b/src/device/service/drivers/qkd/Tools.py @@ -0,0 +1,173 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Dict, Optional, Set +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES +from . import RESOURCE_APPS, RESOURCE_LINKS, RESOURCE_CAPABILITES, RESOURCE_NODE + + +LOGGER = logging.getLogger(__name__) + +HTTP_OK_CODES = { + 200, # OK + 201, # Created + 202, # Accepted + 204, # No Content +} + +def find_key(resource, key): + return json.loads(resource[1])[key] + + + +def config_getter( + root_url : str, resource_key : str, auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None, + node_ids : Set[str] = set(), headers={} +): + # getting endpoints + + url = root_url + '/restconf/data/etsi-qkd-sdn-node:qkd_node/' + + + result = [] + + try: + if resource_key in [RESOURCE_ENDPOINTS, RESOURCE_INTERFACES]: + url += 'qkd_interfaces/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + interfaces = r.json()['qkd_interfaces']['qkd_interface'] + + # If it's a physical endpoint + if resource_key == RESOURCE_ENDPOINTS: + for interface in interfaces: + resource_value = interface.get('qkdi_att_point', {}) + if 'device' in resource_value and 'port' in resource_value: + uuid = '{}:{}'.format(resource_value['device'], resource_value['port']) + resource_key = '/endpoints/endpoint[{:s}]'.format(uuid) + resource_value['uuid'] = uuid + + sample_types = {} + metric_name = 'KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS' + metric_id = 301 + metric_name = metric_name.lower().replace('kpisampletype_', '') + monitoring_resource_key = '{:s}/state/{:s}'.format(resource_key, metric_name) + sample_types[metric_id] = monitoring_resource_key + + + + resource_value['sample_types'] = sample_types + + + + result.append((resource_key, resource_value)) + else: + for interface in interfaces: + resource_key = '/interface[{:s}]'.format(interface['qkdi_id']) + endpoint_value = interface.get('qkdi_att_point', {}) + + if 'device' in endpoint_value and 'port' in endpoint_value: + name = '{}:{}'.format(endpoint_value['device'], endpoint_value['port']) + interface['name'] = name + interface['enabled'] = True # For test purpose only + + result.append((resource_key, interface)) + + elif resource_key in [RESOURCE_LINKS, RESOURCE_NETWORK_INSTANCES]: + url += 'qkd_links/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + links = r.json()['qkd_links']['qkd_link'] + + if resource_key == RESOURCE_LINKS: + for link in links: + link_type = link.get('qkdl_type', 'Direct') + + if link_type == 'Direct': + resource_key = '/link[{:s}]'.format(link['qkdl_id']) + result.append((resource_key, link)) + else: + for link in links: + link_type = link.get('qkdl_type', 'Direct') + + if link_type == 'Virtual': + resource_key = '/service[{:s}]'.format(link['qkdl_id']) + result.append((resource_key, link)) + + elif resource_key == RESOURCE_APPS: + url += 'qkd_applications/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + apps = r.json()['qkd_applications']['qkd_app'] + + for app in apps: + resource_key = '/app[{:s}]'.format(app['app_id']) + result.append((resource_key, app)) + + + elif resource_key == RESOURCE_CAPABILITES: + url += 'qkdn_capabilities/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + capabilities = r.json()['qkdn_capabilities'] + + result.append((resource_key, capabilities)) + + elif resource_key == RESOURCE_NODE: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + node = r.json()['qkd_node'] + + result.append((resource_key, node)) + + except requests.exceptions.Timeout: + LOGGER.exception('Timeout connecting {:s}'.format(url)) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Exception retrieving/parsing endpoints for {:s}'.format(resource_key)) + result.append((resource_key, e)) + + + return result + + + +def create_connectivity_link( + root_url, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + virt_prev_hop = None, virt_next_hops = None, virt_bandwidth = None, + auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None, headers={} +): + + url = root_url + '/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_links/' + is_virtual = bool(virt_prev_hop or virt_next_hops) + + qkd_link = { + 'qkdl_id': link_uuid, + 'qkdl_type': 'etsi-qkd-node-types:' + ('VIRT' if is_virtual else 'PHYS'), + 'qkdl_local': { + 'qkdn_id': node_id_src, + 'qkdi_id': interface_id_src + }, + 'qkdl_remote': { + 'qkdn_id': node_id_dst, + 'qkdi_id': interface_id_dst + } + } + + if is_virtual: + qkd_link['virt_prev_hop'] = virt_prev_hop + qkd_link['virt_next_hop'] = virt_next_hops or [] + qkd_link['virt_bandwidth'] = virt_bandwidth + + + data = {'qkd_links': {'qkd_link': [qkd_link]}} + + requests.post(url, json=data, headers=headers) + diff --git a/src/device/service/drivers/qkd/Tools2.py b/src/device/service/drivers/qkd/Tools2.py new file mode 100644 index 0000000000000000000000000000000000000000..c598c7443d276ea0eb76ce761d173de9944c3cfb --- /dev/null +++ b/src/device/service/drivers/qkd/Tools2.py @@ -0,0 +1,272 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import requests +from typing import Dict, Optional, Set, List, Tuple, Union, Any +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES +from . import RESOURCE_APPS, RESOURCE_LINKS, RESOURCE_CAPABILITES, RESOURCE_NODE + +LOGGER = logging.getLogger(__name__) + +HTTP_OK_CODES = {200, 201, 202, 204} + +def find_key(resource: Tuple[str, str], key: str) -> Any: + """ + Extracts a specific key from a JSON resource. + """ + return json.loads(resource[1]).get(key) + + +def config_getter( + root_url: str, resource_key: str, auth: Optional[Any] = None, timeout: Optional[int] = None, + node_ids: Set[str] = set(), headers: Dict[str, str] = {} +) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches configuration data from a QKD node for a specified resource key. + Returns a list of tuples containing the resource key and the corresponding data or exception. + The function is agnostic to authentication: headers and auth are passed from external sources. + """ + url = f"{root_url}/restconf/data/etsi-qkd-sdn-node:qkd_node/" + LOGGER.info(f"Fetching configuration for {resource_key} from {root_url}") + + try: + if resource_key in [RESOURCE_ENDPOINTS, RESOURCE_INTERFACES]: + return fetch_interfaces(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_LINKS, RESOURCE_NETWORK_INSTANCES]: + return fetch_links(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_APPS]: + return fetch_apps(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_CAPABILITES]: + return fetch_capabilities(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_NODE]: + return fetch_node(url, resource_key, headers, auth, timeout) + + else: + LOGGER.warning(f"Unknown resource key: {resource_key}") + return [(resource_key, ValueError(f"Unknown resource key: {resource_key}"))] + + except requests.exceptions.RequestException as e: + LOGGER.error(f'Error retrieving/parsing {resource_key} from {url}: {e}') + return [(resource_key, e)] + + +def fetch_interfaces(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches interface data from the QKD node. Adapts to both mocked and real QKD data structures. + """ + result = [] + url += 'qkd_interfaces/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + + # Handle both real and mocked QKD response structures + response_data = r.json() + + if isinstance(response_data.get('qkd_interfaces'), dict): + interfaces = response_data.get('qkd_interfaces', {}).get('qkd_interface', []) + else: + interfaces = response_data.get('qkd_interface', []) + + for interface in interfaces: + if resource_key in [RESOURCE_ENDPOINTS]: + # Handle real QKD data format + resource_value = interface.get('qkdi_att_point', {}) + if 'device' in resource_value and 'port' in resource_value: + uuid = f"{resource_value['device']}:{resource_value['port']}" + resource_key_with_uuid = f"/endpoints/endpoint[{uuid}]" + resource_value['uuid'] = uuid + + # Add sample types (for demonstration purposes) + sample_types = {} + metric_name = 'KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS' + metric_id = 301 + metric_name = metric_name.lower().replace('kpisampletype_', '') + monitoring_resource_key = '{:s}/state/{:s}'.format(resource_key, metric_name) + sample_types[metric_id] = monitoring_resource_key + resource_value['sample_types'] = sample_types + + result.append((resource_key_with_uuid, resource_value)) + + else: + # Handle both real and mocked QKD formats + endpoint_value = interface.get('qkdi_att_point', {}) + if 'device' in endpoint_value and 'port' in endpoint_value: + # Real QKD data format + interface_uuid = f"{endpoint_value['device']}:{endpoint_value['port']}" + interface['uuid'] = interface_uuid + interface['name'] = interface_uuid + interface['enabled'] = True # Assume enabled for real data + else: + # Mocked QKD data format + interface_uuid = interface.get('uuid', f"/interface[{interface['qkdi_id']}]") + interface['uuid'] = interface_uuid + interface['name'] = interface.get('name', interface_uuid) + interface['enabled'] = interface.get('enabled', False) # Mocked enabled status + + result.append((f"/interface[{interface['qkdi_id']}]", interface)) + + except requests.RequestException as e: + LOGGER.error(f"Error fetching interfaces from {url}: {e}") + result.append((resource_key, e)) + + return result + +def fetch_links(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches link data from the QKD node. Adapts to both mocked and real QKD data structures. + """ + result = [] + + if resource_key in [RESOURCE_LINKS, RESOURCE_NETWORK_INSTANCES]: + url += 'qkd_links/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + + # Handle real and mocked QKD data structures + links = r.json().get('qkd_links', []) + + for link in links: + # For real QKD format (QKD links returned as dictionary objects) + if isinstance(link, dict): + qkdl_id = link.get('qkdl_id') + link_type = link.get('qkdl_type', 'Direct') + + # Handle both real (PHYS, VIRT) and mocked (DIRECT) link types + if link_type == 'PHYS' or link_type == 'VIRT': + resource_key_direct = f"/link[{qkdl_id}]" + result.append((resource_key_direct, link)) + elif link_type == 'DIRECT': + # Mocked QKD format has a slightly different structure + result.append((f"/link/link[{qkdl_id}]", link)) + + # For mocked QKD format (QKD links returned as lists) + elif isinstance(link, list): + for l in link: + qkdl_id = l.get('uuid') + link_type = l.get('type', 'Direct') + + if link_type == 'DIRECT': + resource_key_direct = f"/link/link[{qkdl_id}]" + result.append((resource_key_direct, l)) + + except requests.RequestException as e: + LOGGER.error(f"Error fetching links from {url}: {e}") + result.append((resource_key, e)) + + return result + +def fetch_apps(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches application data from the QKD node. + """ + result = [] + url += 'qkd_applications/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + + apps = r.json().get('qkd_applications', {}).get('qkd_app', []) + for app in apps: + result.append((f"/app[{app['app_id']}]", app)) + except requests.RequestException as e: + LOGGER.error(f"Error fetching applications from {url}: {e}") + result.append((resource_key, e)) + + return result + + +def fetch_capabilities(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches capabilities data from the QKD node. + """ + result = [] + url += 'qkdn_capabilities/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + result.append((resource_key, r.json())) + except requests.RequestException as e: + LOGGER.error(f"Error fetching capabilities from {url}: {e}") + result.append((resource_key, e)) + + return result + + +def fetch_node(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches node data from the QKD node. + """ + result = [] + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + result.append((resource_key, r.json().get('qkd_node', {}))) + except requests.RequestException as e: + LOGGER.error(f"Error fetching node from {url}: {e}") + result.append((resource_key, e)) + + return result + + +def create_connectivity_link( + root_url: str, link_uuid: str, node_id_src: str, interface_id_src: str, node_id_dst: str, interface_id_dst: str, + virt_prev_hop: Optional[str] = None, virt_next_hops: Optional[List[str]] = None, virt_bandwidth: Optional[int] = None, + auth: Optional[Any] = None, timeout: Optional[int] = None, headers: Dict[str, str] = {} +) -> Union[bool, Exception]: + """ + Creates a connectivity link between QKD nodes using the provided parameters. + """ + url = f"{root_url}/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_links/" + + qkd_link = { + 'qkdl_id': link_uuid, + 'qkdl_type': 'etsi-qkd-node-types:' + ('VIRT' if virt_prev_hop or virt_next_hops else 'PHYS'), + 'qkdl_local': {'qkdn_id': node_id_src, 'qkdi_id': interface_id_src}, + 'qkdl_remote': {'qkdn_id': node_id_dst, 'qkdi_id': interface_id_dst} + } + + if virt_prev_hop or virt_next_hops: + qkd_link['virt_prev_hop'] = virt_prev_hop + qkd_link['virt_next_hop'] = virt_next_hops or [] + qkd_link['virt_bandwidth'] = virt_bandwidth + + data = {'qkd_links': {'qkd_link': [qkd_link]}} + + LOGGER.info(f"Creating connectivity link with payload: {json.dumps(data)}") + + try: + r = requests.post(url, json=data, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + if r.status_code in HTTP_OK_CODES: + LOGGER.info(f"Link {link_uuid} created successfully.") + return True + else: + LOGGER.error(f"Failed to create link {link_uuid}, status code: {r.status_code}") + return False + except requests.exceptions.RequestException as e: + LOGGER.error(f"Exception creating link {link_uuid} with payload {json.dumps(data)}: {e}") + return e diff --git a/src/device/service/drivers/qkd/__init__.py b/src/device/service/drivers/qkd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e24e5523a216f79dcec21f2ac21b2262426acc04 --- /dev/null +++ b/src/device/service/drivers/qkd/__init__.py @@ -0,0 +1,27 @@ +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES + +RESOURCE_LINKS = '__links__' +RESOURCE_APPS = '__apps__' +RESOURCE_CAPABILITES = '__capabilities__' +RESOURCE_NODE = '__node__' + + +ALL_RESOURCE_KEYS = [ + RESOURCE_ENDPOINTS, + RESOURCE_INTERFACES, + RESOURCE_NETWORK_INSTANCES, + RESOURCE_LINKS, + RESOURCE_APPS, + RESOURCE_CAPABILITES, + RESOURCE_NODE +] + +RESOURCE_KEY_MAPPINGS = { + RESOURCE_ENDPOINTS : 'component', + RESOURCE_INTERFACES : 'interface', + RESOURCE_NETWORK_INSTANCES: 'network_instance', + RESOURCE_LINKS : 'links', + RESOURCE_APPS : 'apps', + RESOURCE_CAPABILITES : 'capabilities', + RESOURCE_NODE : 'node' +} diff --git a/src/device/tests/qkd/integration/test_external_qkd_retrieve_information.py b/src/device/tests/qkd/integration/test_external_qkd_retrieve_information.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb91191a96d0b3b6cfeef107b50a881c2261e60 --- /dev/null +++ b/src/device/tests/qkd/integration/test_external_qkd_retrieve_information.py @@ -0,0 +1,184 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import requests +import json +import os +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +from device.service.drivers.qkd.Tools2 import ( + RESOURCE_INTERFACES, + RESOURCE_LINKS, + RESOURCE_CAPABILITES, + RESOURCE_NODE, + RESOURCE_APPS +) + +# Test ID: INT_LQ_Test_01 (QKD Node Authentication) +# Function to retrieve JWT token +def get_jwt_token(node_address, port, username, password): + """ Retrieve JWT token from a node's login endpoint if it's secured. """ + login_url = f"http://{node_address}:{port}/login" + payload = {'username': username, 'password': password} + try: + print(f"Attempting to retrieve JWT token from {login_url}...") + response = requests.post(login_url, headers={'Content-Type': 'application/x-www-form-urlencoded'}, data=payload) + response.raise_for_status() + print(f"Successfully retrieved JWT token from {login_url}") + return response.json().get('access_token') + except requests.exceptions.RequestException as e: + print(f"Failed to retrieve JWT token from {login_url}: {e}") + return None + + +# Environment variables for sensitive information +QKD1_ADDRESS = os.getenv("QKD1_ADDRESS") +QKD2_ADDRESS = os.getenv("QKD2_ADDRESS") +PORT = os.getenv("QKD_PORT") +USERNAME = os.getenv("QKD_USERNAME") +PASSWORD = os.getenv("QKD_PASSWORD") + +# Pytest fixture to initialize QKDDriver with token for Node 1 +@pytest.fixture +def driver_qkd1(): + token = get_jwt_token(QKD1_ADDRESS, PORT, USERNAME, PASSWORD) + headers = {'Authorization': f'Bearer {token}'} if token else {} + return QKDDriver(address=QKD1_ADDRESS, port=PORT, headers=headers) + +# Pytest fixture to initialize QKDDriver with token for Node 2 +@pytest.fixture +def driver_qkd2(): + token = get_jwt_token(QKD2_ADDRESS, PORT, USERNAME, PASSWORD) + headers = {'Authorization': f'Bearer {token}'} if token else {} + return QKDDriver(address=QKD2_ADDRESS, port=PORT, headers=headers) + +# Utility function to save data to a JSON file, filtering out non-serializable objects +def save_json_file(filename, data): + serializable_data = filter_serializable(data) + with open(filename, 'w') as f: + json.dump(serializable_data, f, indent=2) + print(f"Saved data to {filename}") + +# Function to filter out non-serializable objects like HTTPError +def filter_serializable(data): + if isinstance(data, list): + return [filter_serializable(item) for item in data if not isinstance(item, requests.exceptions.RequestException)] + elif isinstance(data, dict): + return {key: filter_serializable(value) for key, value in data.items() if not isinstance(value, requests.exceptions.RequestException)} + return data + +# Utility function to print the retrieved data for debugging, handling errors +def print_data(label, data): + try: + print(f"{label}: {json.dumps(data, indent=2)}") + except TypeError as e: + print(f"Error printing {label}: {e}, Data: {data}") + +# General function to retrieve and handle HTTP errors +def retrieve_data(driver_qkd, resource, resource_name): + try: + data = driver_qkd.GetConfig([resource]) + assert isinstance(data, list), f"Expected a list for {resource_name}" + assert len(data) > 0, f"No {resource_name} found in the system" + return data + except requests.exceptions.HTTPError as e: + print(f"HTTPError while fetching {resource_name}: {e}") + return None + except AssertionError as e: + print(f"AssertionError: {e}") + return None + +# Test ID: INT_LQ_Test_02 (QKD Node Capabilities) +def retrieve_capabilities(driver_qkd, node_name): + capabilities = retrieve_data(driver_qkd, RESOURCE_CAPABILITES, "capabilities") + if capabilities: + print_data(f"{node_name} Capabilities", capabilities) + return capabilities + +# Test ID: INT_LQ_Test_03 (QKD Interfaces) +def retrieve_interfaces(driver_qkd, node_name): + interfaces = retrieve_data(driver_qkd, RESOURCE_INTERFACES, "interfaces") + if interfaces: + print_data(f"{node_name} Interfaces", interfaces) + return interfaces + +# Test ID: INT_LQ_Test_04 (QKD Links) +def retrieve_links(driver_qkd, node_name): + links = retrieve_data(driver_qkd, RESOURCE_LINKS, "links") + if links: + print_data(f"{node_name} Links", links) + return links + +# Test ID: INT_LQ_Test_05 (QKD Link Metrics) +def retrieve_link_metrics(driver_qkd, node_name): + links = retrieve_links(driver_qkd, node_name) + if links: + for link in links: + if 'performance_metrics' in link[1]: + print_data(f"{node_name} Link Metrics", link[1]['performance_metrics']) + else: + print(f"No metrics found for link {link[0]}") + return links + +# Test ID: INT_LQ_Test_06 (QKD Applications) +def retrieve_applications(driver_qkd, node_name): + applications = retrieve_data(driver_qkd, RESOURCE_APPS, "applications") + if applications: + print_data(f"{node_name} Applications", applications) + return applications + +# Test ID: INT_LQ_Test_07 (System Health Check) +def retrieve_node_data(driver_qkd, node_name): + node_data = retrieve_data(driver_qkd, RESOURCE_NODE, "node data") + if node_data: + print_data(f"{node_name} Node Data", node_data) + return node_data + +# Main test to retrieve and save data from QKD1 and QKD2 to files +def test_retrieve_and_save_data(driver_qkd1, driver_qkd2): + # Retrieve data for QKD1 + qkd1_interfaces = retrieve_interfaces(driver_qkd1, "QKD1") + qkd1_links = retrieve_links(driver_qkd1, "QKD1") + qkd1_capabilities = retrieve_capabilities(driver_qkd1, "QKD1") + qkd1_node_data = retrieve_node_data(driver_qkd1, "QKD1") + qkd1_apps = retrieve_applications(driver_qkd1, "QKD1") + + qkd1_data = { + "interfaces": qkd1_interfaces, + "links": qkd1_links, + "capabilities": qkd1_capabilities, + "apps": qkd1_apps, + "node_data": qkd1_node_data + } + + # Save QKD1 data to file + save_json_file('qkd1_data.json', qkd1_data) + + # Retrieve data for QKD2 + qkd2_interfaces = retrieve_interfaces(driver_qkd2, "QKD2") + qkd2_links = retrieve_links(driver_qkd2, "QKD2") + qkd2_capabilities = retrieve_capabilities(driver_qkd2, "QKD2") + qkd2_node_data = retrieve_node_data(driver_qkd2, "QKD2") + qkd2_apps = retrieve_applications(driver_qkd2, "QKD2") + + qkd2_data = { + "interfaces": qkd2_interfaces, + "links": qkd2_links, + "capabilities": qkd2_capabilities, + "apps": qkd2_apps, + "node_data": qkd2_node_data + } + + # Save QKD2 data to file + save_json_file('qkd2_data.json', qkd2_data) diff --git a/src/device/tests/qkd/unit/PrepareScenario.py b/src/device/tests/qkd/unit/PrepareScenario.py new file mode 100644 index 0000000000000000000000000000000000000000..756b914d55df788472ed6e839e1fc29e356877e4 --- /dev/null +++ b/src/device/tests/qkd/unit/PrepareScenario.py @@ -0,0 +1,126 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, os, time, logging +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_HTTP, + get_env_var_name, get_service_port_http +) +from context.client.ContextClient import ContextClient +from nbi.service.rest_server.RestServer import RestServer +from nbi.service.rest_server.nbi_plugins.tfs_api import register_tfs_api +from device.client.DeviceClient import DeviceClient +from device.service.DeviceService import DeviceService +from device.service.driver_api.DriverFactory import DriverFactory +from device.service.driver_api.DriverInstanceCache import DriverInstanceCache +from device.service.drivers import DRIVERS +from device.tests.CommonObjects import CONTEXT, TOPOLOGY +from device.tests.MockService_Dependencies import MockService_Dependencies +from monitoring.client.MonitoringClient import MonitoringClient +from requests import codes as requests_codes +import requests + +# Constants +LOCAL_HOST = '127.0.0.1' +MOCKSERVICE_PORT = 8080 + +# Get dynamic port for NBI service +NBI_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_http(ServiceNameEnum.NBI) + +# Set environment variables for the NBI service host and port +os.environ[get_env_var_name(ServiceNameEnum.NBI, ENVVAR_SUFIX_SERVICE_HOST)] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.NBI, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(NBI_SERVICE_PORT) + +# Expected status codes for requests +EXPECTED_STATUS_CODES = {requests_codes['OK'], requests_codes['CREATED'], requests_codes['ACCEPTED'], requests_codes['NO_CONTENT']} + +# Debugging output for the port number +print(f"MOCKSERVICE_PORT: {MOCKSERVICE_PORT}") +print(f"NBI_SERVICE_PORT: {NBI_SERVICE_PORT}") + +@pytest.fixture(scope='session') +def mock_service(): + _service = MockService_Dependencies(MOCKSERVICE_PORT) + _service.configure_env_vars() + _service.start() + yield _service + _service.stop() + +@pytest.fixture(scope='session') +def nbi_service_rest(mock_service): # Pass the `mock_service` as an argument if needed + _rest_server = RestServer() + register_tfs_api(_rest_server) # Register the TFS API with the REST server + _rest_server.start() + time.sleep(1) # Give time for the server to start + yield _rest_server + _rest_server.shutdown() + _rest_server.join() + +@pytest.fixture(scope='session') +def context_client(mock_service): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_service(context_client, monitoring_client): + _driver_factory = DriverFactory(DRIVERS) + _driver_instance_cache = DriverInstanceCache(_driver_factory) + _service = DeviceService(_driver_instance_cache) + _service.start() + yield _service + _service.stop() + +@pytest.fixture(scope='session') +def device_client(device_service): + _client = DeviceClient() + yield _client + _client.close() + +# General request function +def do_rest_request(method, url, body=None, timeout=10, allow_redirects=True, logger=None): + # Construct the request URL with NBI service port + request_url = f"http://{LOCAL_HOST}:{NBI_SERVICE_PORT}{url}" + + # Log the request details for debugging + if logger: + msg = f"Request: {method.upper()} {request_url}" + if body: + msg += f" body={body}" + logger.warning(msg) + + # Send the request + reply = requests.request(method, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects) + + # Log the response details for debugging + if logger: + logger.warning(f"Reply: {reply.text}") + + # Print status code and response for debugging instead of asserting + print(f"Status code: {reply.status_code}") + print(f"Response: {reply.text}") + + # Return the JSON response if present + if reply.content: + return reply.json() + return None + +# Function for GET requests +def do_rest_get_request(url, body=None, timeout=10, allow_redirects=True, logger=None): + return do_rest_request('get', url, body, timeout, allow_redirects, logger=logger) + +# Function for POST requests +def do_rest_post_request(url, body=None, timeout=10, allow_redirects=True, logger=None): + return do_rest_request('post', url, body, timeout, allow_redirects, logger=logger) diff --git a/src/device/tests/qkd/unit/retrieve_device_mock_information.py b/src/device/tests/qkd/unit/retrieve_device_mock_information.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e18f51d4ce788cbc40dc6befe101df65ef765f --- /dev/null +++ b/src/device/tests/qkd/unit/retrieve_device_mock_information.py @@ -0,0 +1,104 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, urllib +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader +from context.client.ContextClient import ContextClient +from nbi.service.rest_server.RestServer import RestServer +from common.tools.object_factory.Context import json_context_id +from device.tests.qkd.unit.PrepareScenario import mock_service, nbi_service_rest, do_rest_get_request + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID) + + +# ----- Context -------------------------------------------------------------------------------------------------------- + +def test_rest_get_context_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/context_ids') + print("Context IDs:", reply) + +def test_rest_get_contexts(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/contexts') + print("Contexts:", reply) + +def test_rest_get_context(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}') + print("Context data:", reply) + + +# ----- Topology ------------------------------------------------------------------------------------------------------- + +def test_rest_get_topology_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/topology_ids') + print("Topology IDs:", reply) + +def test_rest_get_topologies(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/topologies') + print("Topologies:", reply) + +def test_rest_get_topology(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/topology/{topology_uuid}') + print("Topology data:", reply) + + +# ----- Device --------------------------------------------------------------------------------------------------------- + +def test_rest_get_device_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/device_ids') + print("Device IDs:", reply) + +def test_rest_get_devices(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/devices') + print("Devices:", reply) + + +# ----- Link ----------------------------------------------------------------------------------------------------------- + +def test_rest_get_link_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/link_ids') + print("Link IDs:", reply) + +def test_rest_get_links(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/links') + print("Links:", reply) + + +# ----- Service -------------------------------------------------------------------------------------------------------- + +def test_rest_get_service_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/link_ids') + print("Service IDs:", reply) + +def test_rest_get_topologies(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/services') + print("Services:", reply) + +# ----- Apps ----------------------------------------------------------------------------------------------------------- + +def test_rest_get_apps(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) # Context ID + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/apps') + print("Apps:", reply) diff --git a/src/device/tests/qkd/unit/test_application_deployment.py b/src/device/tests/qkd/unit/test_application_deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..92e16663b41556563aab884be2ee48518cd15ff7 --- /dev/null +++ b/src/device/tests/qkd/unit/test_application_deployment.py @@ -0,0 +1,47 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import json +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver with the appropriate settings + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + +def test_application_deployment(qkd_driver): + qkd_driver.Connect() + + # Application registration data + app_data = { + 'qkd_app': [ + { + 'app_id': '00000001-0001-0000-0000-000000000001', + 'client_app_id': [], + 'app_statistics': {'statistics': []}, + 'app_qos': {}, + 'backing_qkdl_id': [] + } + ] + } + + # Send a POST request to create the application + response = qkd_driver.SetConfig([('/qkd_applications/qkd_app', json.dumps(app_data))]) + + # Verify response + assert response[0] is True, "Expected application registration to succeed" diff --git a/src/device/tests/qkd/unit/test_mock_qkd_node.py b/src/device/tests/qkd/unit/test_mock_qkd_node.py new file mode 100644 index 0000000000000000000000000000000000000000..f679a8389476c35a6881b7dc6484baab8ef4e20b --- /dev/null +++ b/src/device/tests/qkd/unit/test_mock_qkd_node.py @@ -0,0 +1,31 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import requests +from requests.exceptions import ConnectionError + +def test_mock_qkd_node_responses(): + response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node') + assert response.status_code == 200 + data = response.json() + assert 'qkd_node' in data + +def test_mock_node_failure_scenarios(): + try: + response = requests.get('http://127.0.0.1:12345/restconf/data/etsi-qkd-sdn-node:qkd_node') + except ConnectionError as e: + assert isinstance(e, ConnectionError) + else: + pytest.fail("ConnectionError not raised as expected") diff --git a/src/device/tests/qkd/unit/test_qkd_compliance.py b/src/device/tests/qkd/unit/test_qkd_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..2f305888e952e7d9acc3e96ffc1e427a7cc85685 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_compliance.py @@ -0,0 +1,24 @@ + +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import requests +from tests.tools.mock_qkd_nodes.YangValidator import YangValidator + +def test_compliance_with_yang_models(): + validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types']) + response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node') + data = response.json() + assert validator.parse_to_dict(data) is not None diff --git a/src/device/tests/qkd/unit/test_qkd_configuration.py b/src/device/tests/qkd/unit/test_qkd_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..15c4787c28a92ed07d8666b4c715954da1e690d6 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_configuration.py @@ -0,0 +1,230 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import json +from requests.exceptions import HTTPError +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +import requests +from device.service.drivers.qkd.Tools2 import ( + RESOURCE_INTERFACES, + RESOURCE_LINKS, + RESOURCE_ENDPOINTS, + RESOURCE_APPS, + RESOURCE_CAPABILITES, + RESOURCE_NODE +) + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver with the appropriate settings, ensure correct JWT headers are included + token = "YOUR_JWT_TOKEN" # Replace with your actual JWT token + if not token: + pytest.fail("JWT token is missing. Make sure to generate a valid JWT token.") + headers = {"Authorization": f"Bearer {token}"} + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, headers=headers) + +# Utility function to print the retrieved data for debugging +def print_data(label, data): + print(f"{label}: {json.dumps(data, indent=2)}") + +# Test ID: SBI_Test_03 (Initial Config Retrieval) +def test_initial_config_retrieval(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate the initial configuration + config = qkd_driver.GetInitialConfig() + + # Since GetInitialConfig returns a list, adjust the assertions accordingly + assert isinstance(config, list), "Expected a list for initial config" + assert len(config) > 0, "Initial config should not be empty" + + # Output for debugging + print_data("Initial Config", config) + +# Test ID: INT_LQ_Test_05 (QKD Devices Retrieval) +def test_retrieve_devices(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate device information + devices = qkd_driver.GetConfig([RESOURCE_NODE]) + assert isinstance(devices, list), "Expected a list of devices" + + if not devices: + pytest.skip("No devices found in the system. Skipping device test.") + + for device in devices: + assert isinstance(device, tuple), "Each device entry must be a tuple" + assert isinstance(device[1], dict), "Device data must be a dictionary" + if isinstance(device[1], Exception): + pytest.fail(f"Error retrieving devices: {device[1]}") + + # Output for debugging + print_data("Devices", devices) + +# Test ID: INT_LQ_Test_04 (QKD Links Retrieval) +def test_retrieve_links(qkd_driver): + qkd_driver.Connect() + + try: + # Fetch the links using the correct resource key + links = qkd_driver.GetConfig([RESOURCE_LINKS]) + assert isinstance(links, list), "Expected a list of tuples (resource key, data)." + + if len(links) == 0: + pytest.skip("No links found in the system, skipping link validation.") + + for link in links: + assert isinstance(link, tuple), "Each link entry must be a tuple" + resource_key, link_data = link # Unpack the tuple + + # Handle HTTPError or exception in the response + if isinstance(link_data, requests.exceptions.HTTPError): + pytest.fail(f"Failed to retrieve links due to HTTP error: {link_data}") + + if isinstance(link_data, dict): + # For real QKD data (links as dictionaries) + assert 'qkdl_id' in link_data, "Missing 'qkdl_id' in link data" + assert 'qkdl_local' in link_data, "Missing 'qkdl_local' in link data" + assert 'qkdl_remote' in link_data, "Missing 'qkdl_remote' in link data" + assert 'qkdl_type' in link_data, "Missing 'qkdl_type' in link data" + + # Check 'virt_prev_hop' only for virtual links (VIRT) + if link_data['qkdl_type'] == 'etsi-qkd-node-types:VIRT': + virt_prev_hop = link_data.get('virt_prev_hop') + assert virt_prev_hop is None or re.match(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', str(virt_prev_hop)), \ + f"Invalid 'virt_prev_hop': {virt_prev_hop}" + + # Print out the link details for debugging + print(f"Link ID: {link_data['qkdl_id']}") + print(f"Link Type: {link_data['qkdl_type']}") + print(f"Local QKD: {json.dumps(link_data['qkdl_local'], indent=2)}") + print(f"Remote QKD: {json.dumps(link_data['qkdl_remote'], indent=2)}") + + elif isinstance(link_data, list): + # For mocked QKD data (links as lists of dictionaries) + for mock_link in link_data: + assert 'uuid' in mock_link, "Missing 'uuid' in mocked link data" + assert 'src_qkdn_id' in mock_link, "Missing 'src_qkdn_id' in mocked link data" + assert 'dst_qkdn_id' in mock_link, "Missing 'dst_qkdn_id' in mocked link data" + + # Print out the mocked link details for debugging + print(f"Mock Link ID: {mock_link['uuid']}") + print(f"Source QKD ID: {mock_link['src_qkdn_id']}") + print(f"Destination QKD ID: {mock_link['dst_qkdn_id']}") + + else: + pytest.fail(f"Unexpected link data format: {type(link_data)}") + + except HTTPError as e: + pytest.fail(f"HTTP error occurred while retrieving links: {e}") + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + +# Test for QKD Services +def test_retrieve_services(qkd_driver): + qkd_driver.Connect() + services = qkd_driver.GetConfig([RESOURCE_ENDPOINTS]) + assert isinstance(services, list), "Expected a list of services" + + if not services: + pytest.skip("No services found in the system. Skipping service test.") + + for service in services: + assert isinstance(service, tuple), "Each service entry must be a tuple" + assert isinstance(service[1], dict), "Service data must be a dictionary" + if isinstance(service[1], Exception): + pytest.fail(f"Error retrieving services: {service[1]}") + + print("Services:", json.dumps(services, indent=2)) + +# Test ID: INT_LQ_Test_07 (QKD Applications Retrieval) +def test_retrieve_applications(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate applications information + applications = qkd_driver.GetConfig([RESOURCE_APPS]) # Adjust to fetch applications using the correct key + assert isinstance(applications, list), "Expected a list of applications" + + if not applications: + pytest.skip("No applications found in the system. Skipping applications test.") + + for app in applications: + assert isinstance(app, tuple), "Each application entry must be a tuple" + assert isinstance(app[1], dict), "Application data must be a dictionary" + if isinstance(app[1], Exception): + pytest.fail(f"Error retrieving applications: {app[1]}") + + # Output for debugging + print_data("Applications", applications) + +# Test ID: INT_LQ_Test_03 (QKD Interfaces Retrieval) +def test_retrieve_interfaces(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate interface information + interfaces = qkd_driver.GetConfig([RESOURCE_INTERFACES]) + + assert isinstance(interfaces, list), "Expected a list of interfaces" + assert len(interfaces) > 0, "No interfaces found in the system" + + for interface in interfaces: + assert isinstance(interface, tuple), "Each interface entry must be a tuple" + assert isinstance(interface[1], dict), "Interface data must be a dictionary" + if isinstance(interface[1], Exception): + pytest.fail(f"Error retrieving interfaces: {interface[1]}") + + # Output for debugging + print_data("Interfaces", interfaces) + +# Test ID: INT_LQ_Test_02 (QKD Capabilities Retrieval) +def test_retrieve_capabilities(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate capabilities information + capabilities = qkd_driver.GetConfig([RESOURCE_CAPABILITES]) + + assert isinstance(capabilities, list), "Expected a list of capabilities" + assert len(capabilities) > 0, "No capabilities found in the system" + + for capability in capabilities: + assert isinstance(capability, tuple), "Each capability entry must be a tuple" + assert isinstance(capability[1], dict), "Capability data must be a dictionary" + if isinstance(capability[1], Exception): + pytest.fail(f"Error retrieving capabilities: {capability[1]}") + + # Output for debugging + print_data("Capabilities", capabilities) + +# Test ID: INT_LQ_Test_03 (QKD Endpoints Retrieval) +def test_retrieve_endpoints(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate endpoint information + endpoints = qkd_driver.GetConfig([RESOURCE_ENDPOINTS]) + + assert isinstance(endpoints, list), "Expected a list of endpoints" + assert len(endpoints) > 0, "No endpoints found in the system" + + for endpoint in endpoints: + assert isinstance(endpoint, tuple), "Each endpoint entry must be a tuple" + assert isinstance(endpoint[1], dict), "Endpoint data must be a dictionary" + if isinstance(endpoint[1], Exception): + pytest.fail(f"Error retrieving endpoints: {endpoint[1]}") + + # Output for debugging + print_data("Endpoints", endpoints) diff --git a/src/device/tests/qkd/unit/test_qkd_error_hanling.py b/src/device/tests/qkd/unit/test_qkd_error_hanling.py new file mode 100644 index 0000000000000000000000000000000000000000..d93e3711136de496fd39365563032f827cfbe913 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_error_hanling.py @@ -0,0 +1,93 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, requests +from requests.exceptions import ConnectionError, HTTPError, Timeout +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver for testing + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + +def test_invalid_operations_on_network_links(qkd_driver): + """ + Test Case ID: SBI_Test_09 - Perform invalid operations and validate error handling. + Objective: Perform invalid operations on network links and ensure proper error handling and logging. + """ + qkd_driver.Connect() + + # Step 1: Perform invalid operation with an incorrect resource key + invalid_payload = { + "invalid_resource_key": { + "invalid_field": "invalid_value" + } + } + + try: + # Attempt to perform an invalid operation (simulate wrong resource key) + response = requests.post(f'http://{qkd_driver.address}/invalid_resource', json=invalid_payload) + response.raise_for_status() + + except HTTPError as e: + # Step 2: Validate proper error handling and user-friendly messages + print(f"Handled HTTPError: {e}") + assert e.response.status_code in [400, 404], "Expected 400 Bad Request or 404 Not Found for invalid operation." + if e.response.status_code == 404: + assert "Not Found" in e.response.text, "Expected user-friendly 'Not Found' message." + elif e.response.status_code == 400: + assert "Invalid resource key" in e.response.text, "Expected user-friendly 'Bad Request' message." + + except Exception as e: + # Log unexpected exceptions + pytest.fail(f"Unexpected error occurred: {e}") + + finally: + qkd_driver.Disconnect() + +def test_network_failure_simulation(qkd_driver): + """ + Test Case ID: SBI_Test_10 - Simulate network failures and validate resilience and recovery. + Objective: Simulate network failures (e.g., QKD node downtime) and validate system's resilience. + """ + qkd_driver.Connect() + + try: + # Step 1: Simulate network failure (disconnect QKD node, or use unreachable address/port) + qkd_driver_with_failure = QKDDriver(address='127.0.0.1', port=12345, username='user', password='pass') # Valid but incorrect port + + # Try to connect and retrieve state, expecting a failure + response = qkd_driver_with_failure.GetState() + + # Step 2: Validate resilience and recovery mechanisms + # Check if the response is empty, indicating a failure to retrieve state + if not response: + print("Network failure simulated successfully and handled.") + else: + pytest.fail("Expected network failure but received a valid response.") + + except HTTPError as e: + # Log HTTP errors as part of error handling + print(f"Handled network failure error: {e}") + + except Exception as e: + # Step 3: Log unexpected exceptions + print(f"Network failure encountered: {e}") + + finally: + # Step 4: Ensure driver disconnects properly + qkd_driver.Disconnect() diff --git a/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..150d00fd079b0a036f383653c833562279bb4d72 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py @@ -0,0 +1,40 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, requests +from unittest.mock import patch +from device.service.drivers.qkd.QKDDriver import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + +# Deliverable Test ID: SBI_Test_01 +def test_qkd_driver_connection(qkd_driver): + assert qkd_driver.Connect() is True + +# Deliverable Test ID: SBI_Test_01 +def test_qkd_driver_invalid_connection(): + qkd_driver = QKDDriver(address='127.0.0.1', port=12345, username='user', password='pass') # Use invalid port directly + assert qkd_driver.Connect() is False + +# Deliverable Test ID: SBI_Test_10 +@patch('device.service.drivers.qkd.QKDDriver2.requests.get') +def test_qkd_driver_timeout_connection(mock_get, qkd_driver): + mock_get.side_effect = requests.exceptions.Timeout + qkd_driver.timeout = 0.001 # Simulate very short timeout + assert qkd_driver.Connect() is False diff --git a/src/device/tests/qkd/unit/test_qkd_performance.py b/src/device/tests/qkd/unit/test_qkd_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..b15d1ab070b61333c55f7674e39ba74aae891de6 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_performance.py @@ -0,0 +1,32 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# tests/unit/test_qkd_performance.py + +import pytest, time +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +def test_performance_under_load(): + driver = QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + driver.Connect() + + start_time = time.time() + for _ in range(1000): + driver.GetConfig(['/qkd_interfaces/qkd_interface']) + end_time = time.time() + + assert (end_time - start_time) < 60 diff --git a/src/device/tests/qkd/unit/test_qkd_security.py b/src/device/tests/qkd/unit/test_qkd_security.py new file mode 100644 index 0000000000000000000000000000000000000000..f2942fd4685dce13f89832528d4298b267707886 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_security.py @@ -0,0 +1,88 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import pytest +import requests +from requests.exceptions import HTTPError +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +from device.service.drivers.qkd.Tools2 import RESOURCE_CAPABILITES + +# Helper function to print data in a formatted JSON style for debugging +def print_data(label, data): + print(f"{label}: {json.dumps(data, indent=2)}") + +# Environment variables for sensitive information +QKD1_ADDRESS = os.getenv("QKD1_ADDRESS") +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 +PORT = os.getenv("QKD_PORT") +USERNAME = os.getenv("QKD_USERNAME") +PASSWORD = os.getenv("QKD_PASSWORD") + + +# Utility function to retrieve JWT token +def get_jwt_token(address, port, username, password): + url = f"http://{address}:{port}/login" + headers = {"Content-Type": "application/x-www-form-urlencoded"} + payload = f"username={username}&password={password}" + + try: + response = requests.post(url, data=payload, headers=headers) + response.raise_for_status() + return response.json().get('access_token') + except requests.exceptions.RequestException as e: + print(f"Failed to retrieve JWT token: {e}") + return None + +# Real QKD Driver (Requires JWT token) +@pytest.fixture +def real_qkd_driver(): + token = get_jwt_token(QKD1_ADDRESS, PORT, USERNAME, PASSWORD) # Replace with actual details + if not token: + pytest.fail("Failed to retrieve JWT token.") + headers = {'Authorization': f'Bearer {token}'} + return QKDDriver(address=QKD1_ADDRESS, port=PORT, headers=headers) + +# Mock QKD Driver (No actual connection, mock capabilities) +@pytest.fixture +def mock_qkd_driver(): + # Initialize the mock QKD driver with mock settings + token = "mock_token" + headers = {"Authorization": f"Bearer {token}"} + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, headers=headers) + +# General function to retrieve and test capabilities +def retrieve_capabilities(qkd_driver, driver_name): + try: + qkd_driver.Connect() + capabilities = qkd_driver.GetConfig([RESOURCE_CAPABILITES]) + assert isinstance(capabilities, list), "Expected a list of capabilities" + assert len(capabilities) > 0, f"No capabilities found for {driver_name}" + print_data(f"{driver_name} Capabilities", capabilities) + except HTTPError as e: + pytest.fail(f"HTTPError while fetching capabilities for {driver_name}: {e}") + except AssertionError as e: + pytest.fail(f"AssertionError: {e}") + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + +# Test for Real QKD Capabilities +def test_real_qkd_capabilities(real_qkd_driver): + retrieve_capabilities(real_qkd_driver, "Real QKD") + +# Test for Mock QKD Capabilities +def test_mock_qkd_capabilities(mock_qkd_driver): + retrieve_capabilities(mock_qkd_driver, "Mock QKD") diff --git a/src/device/tests/qkd/unit/test_qkd_subscription.py b/src/device/tests/qkd/unit/test_qkd_subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..883fe2a1a6defe85a995c12e824a5e7d88159981 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_subscription.py @@ -0,0 +1,53 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from typing import List, Tuple +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + + +def test_state_subscription(qkd_driver): + """ + Test Case ID: SBI_Test_06 - Subscribe to state changes and validate the subscription process. + """ + qkd_driver.Connect() + + try: + # Step 1: Define the subscription + subscriptions = [ + ('00000001-0000-0000-0000-000000000000', 60, 10) # (node_id, frequency, timeout) + ] + + # Step 2: Subscribe to state changes using the driver method + subscription_results = qkd_driver.SubscribeState(subscriptions) + + # Step 3: Validate that the subscription was successful + assert all(result is True for result in subscription_results), "Subscription to state changes failed." + + print("State subscription successful:", subscription_results) + + except Exception as e: + pytest.fail(f"An unexpected error occurred during state subscription: {e}") + + finally: + qkd_driver.Disconnect() diff --git a/src/device/tests/qkd/unit/test_qkd_unsubscription.py b/src/device/tests/qkd/unit/test_qkd_unsubscription.py new file mode 100644 index 0000000000000000000000000000000000000000..883fe2a1a6defe85a995c12e824a5e7d88159981 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_unsubscription.py @@ -0,0 +1,53 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from typing import List, Tuple +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + + +def test_state_subscription(qkd_driver): + """ + Test Case ID: SBI_Test_06 - Subscribe to state changes and validate the subscription process. + """ + qkd_driver.Connect() + + try: + # Step 1: Define the subscription + subscriptions = [ + ('00000001-0000-0000-0000-000000000000', 60, 10) # (node_id, frequency, timeout) + ] + + # Step 2: Subscribe to state changes using the driver method + subscription_results = qkd_driver.SubscribeState(subscriptions) + + # Step 3: Validate that the subscription was successful + assert all(result is True for result in subscription_results), "Subscription to state changes failed." + + print("State subscription successful:", subscription_results) + + except Exception as e: + pytest.fail(f"An unexpected error occurred during state subscription: {e}") + + finally: + qkd_driver.Disconnect() diff --git a/src/device/tests/qkd/unit/test_set_new_configuration.py b/src/device/tests/qkd/unit/test_set_new_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..438e46d74f0ad1204f496aaf99e29e21f41c5805 --- /dev/null +++ b/src/device/tests/qkd/unit/test_set_new_configuration.py @@ -0,0 +1,126 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, requests, uuid +from requests.exceptions import HTTPError +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +from device.service.drivers.qkd.Tools2 import RESOURCE_APPS + +MOCK_QKD1_ADDRRESS = '127.0.0.1' +MOCK_PORT1 = 11111 +MOCK_QKD3_ADDRRESS = '127.0.0.1' +MOCK_PORT3 = 33333 + +@pytest.fixture +def qkd_driver1(): + # Initialize the QKD driver for QKD1 + return QKDDriver(address=MOCK_QKD1_ADDRRESS, port=MOCK_PORT1, username='user', password='pass') + +@pytest.fixture +def qkd_driver3(): + # Initialize the QKD driver for QKD3 + return QKDDriver(address=MOCK_QKD3_ADDRRESS, port=MOCK_PORT3, username='user', password='pass') + +def create_qkd_app(driver, qkdn_id, backing_qkdl_id, client_app_id=None): + """ + Helper function to create QKD applications on the given driver. + """ + server_app_id = str(uuid.uuid4()) # Generate a unique server_app_id + + app_payload = { + 'app': { + 'server_app_id': server_app_id, + 'client_app_id': client_app_id if client_app_id else [], # Add client_app_id if provided + 'app_status': 'ON', + 'local_qkdn_id': qkdn_id, + 'backing_qkdl_id': backing_qkdl_id + } + } + + try: + # Log the payload being sent + print(f"Sending payload to {driver.address}: {app_payload}") + + # Send POST request to create the application + response = requests.post(f'http://{driver.address}/app/create_qkd_app', json=app_payload) + + # Check if the request was successful (HTTP 2xx) + response.raise_for_status() + + # Validate the response + assert response.status_code == 200, f"Failed to create QKD app for {driver.address}: {response.text}" + + response_data = response.json() + assert response_data.get('status') == 'success', "Application creation failed." + + # Log the response from the server + print(f"Server {driver.address} response: {response_data}") + + return server_app_id # Return the created server_app_id + + except HTTPError as e: + pytest.fail(f"HTTP error occurred while creating the QKD application on {driver.address}: {e}") + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + +def test_create_qkd_application_bidirectional(qkd_driver1, qkd_driver3): + """ + Create QKD applications on both qkd1 and qkd3, and validate the complete creation in both directions. + """ + + qkd_driver1.Connect() + qkd_driver3.Connect() + + try: + # Step 1: Create QKD application for qkd1, referencing qkd3 as the backing QKDL + server_app_id_qkd1 = create_qkd_app( + qkd_driver1, + qkdn_id='00000001-0000-0000-0000-000000000000', + backing_qkdl_id=['00000003-0002-0000-0000-000000000000'] # qkd3's QKDL + ) + + # Step 2: Create QKD application for qkd3, referencing qkd1 as the backing QKDL, and setting client_app_id to qkd1's app + create_qkd_app( + qkd_driver3, + qkdn_id='00000003-0000-0000-0000-000000000000', + backing_qkdl_id=['00000003-0002-0000-0000-000000000000'], # qkd3's QKDL + client_app_id=[server_app_id_qkd1] # Set qkd1 as the client + ) + + # Step 3: Fetch applications from both qkd1 and qkd3 to validate that the applications exist + apps_qkd1 = qkd_driver1.GetConfig([RESOURCE_APPS]) + apps_qkd3 = qkd_driver3.GetConfig([RESOURCE_APPS]) + + print(f"QKD1 applications config: {apps_qkd1}") + print(f"QKD3 applications config: {apps_qkd3}") + + # Debugging: Print the full structure of the apps to understand what is returned + for app in apps_qkd1: + print(f"QKD1 App: {app}") + + # Debugging: Print the full structure of the apps to understand what is returned + for app in apps_qkd3: + print(f"QKD3 App: {app}") + + # Step 4: Validate the applications are created using app_id instead of server_app_id + assert any(app[1].get('app_id') == '00000001-0001-0000-0000-000000000000' for app in apps_qkd1), "QKD app not created on qkd1." + assert any(app[1].get('app_id') == '00000003-0001-0000-0000-000000000000' for app in apps_qkd3), "QKD app not created on qkd3." + + print("QKD applications created successfully in both directions.") + + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + finally: + qkd_driver1.Disconnect() + qkd_driver3.Disconnect() diff --git a/src/dlt/connector/client/DltConnectorClientAsync.py b/src/dlt/connector/client/DltConnectorClientAsync.py new file mode 100644 index 0000000000000000000000000000000000000000..b9ed8a4d6e1f3878ad148cac5529bd6908a03a24 --- /dev/null +++ b/src/dlt/connector/client/DltConnectorClientAsync.py @@ -0,0 +1,113 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging, asyncio + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc +from common.proto.context_pb2 import Empty, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId +from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceStub +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class DltConnectorClientAsync: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.DLT) + if not port: port = get_service_port_grpc(ServiceNameEnum.DLT) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) + self.channel = None + self.stub = None + #self.connect() + #LOGGER.debug('Channel created') + + async def connect(self): + self.channel = grpc.aio.insecure_channel(self.endpoint) + self.stub = DltConnectorServiceStub(self.channel) + LOGGER.debug('Channel created') + + async def close(self): + if self.channel is not None: + await self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + async def RecordAll(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAll request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAll(request) + LOGGER.debug('RecordAll result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllDevices(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllDevices request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllDevices(request) + LOGGER.debug('RecordAllDevices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordDevice(self, request: DltDeviceId) -> Empty: + LOGGER.debug('RECORD_DEVICE request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordDevice(request) + LOGGER.debug('RecordDevice result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllLinks(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllLinks request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllLinks(request) + LOGGER.debug('RecordAllLinks result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordLink(self, request: DltLinkId) -> Empty: + LOGGER.debug('RecordLink request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordLink(request) + LOGGER.debug('RecordLink result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllServices(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllServices request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllServices(request) + LOGGER.debug('RecordAllServices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordService(self, request: DltServiceId) -> Empty: + LOGGER.debug('RecordService request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordService(request) + LOGGER.debug('RecordService result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllSlices(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllSlices request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllSlices(request) + LOGGER.debug('RecordAllSlices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordSlice(self, request: DltSliceId) -> Empty: + LOGGER.debug('RecordSlice request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordSlice(request) + LOGGER.debug('RecordSlice result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/dlt/connector/client/DltEventsCollector.py b/src/dlt/connector/client/DltEventsCollector.py index e59784a4d2902459d7bc88925e5b83a698770012..ce7d01480ba522a4006ca5d7b02943a157a10d29 100644 --- a/src/dlt/connector/client/DltEventsCollector.py +++ b/src/dlt/connector/client/DltEventsCollector.py @@ -19,7 +19,6 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from dlt.connector.client.DltGatewayClient import DltGatewayClient LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) # This class accepts an event_handler method as attribute that can be used to pre-process and # filter events before they reach the events_queue. Depending on the handler, the supported diff --git a/src/dlt/connector/client/DltGatewayClient.py b/src/dlt/connector/client/DltGatewayClient.py index 31ad4cca22d087f1a8d3d9ce573a6605adf3c1df..21d4df57dd5eaa72eddc147b83b6a66344e20f02 100644 --- a/src/dlt/connector/client/DltGatewayClient.py +++ b/src/dlt/connector/client/DltGatewayClient.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterator + import grpc, logging +from typing import Iterator from common.proto.context_pb2 import Empty, TeraFlowController from common.proto.dlt_gateway_pb2 import ( - DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription) + DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription +) from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -43,7 +45,8 @@ class DltGatewayClient: self.stub = DltGatewayServiceStub(self.channel) def close(self): - if self.channel is not None: self.channel.close() + if self.channel is not None: + self.channel.close() self.channel = None self.stub = None diff --git a/src/dlt/connector/client/DltGatewayClientAsync.py b/src/dlt/connector/client/DltGatewayClientAsync.py new file mode 100644 index 0000000000000000000000000000000000000000..816241ec587295ffd6a8ef24cca5c66942a849d5 --- /dev/null +++ b/src/dlt/connector/client/DltGatewayClientAsync.py @@ -0,0 +1,85 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio, grpc, logging +from typing import Iterator, List +from common.proto.context_pb2 import Empty, TeraFlowController +from common.proto.dlt_gateway_pb2 import ( + DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription +) +from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string +from dlt.connector.Config import DLT_GATEWAY_HOST, DLT_GATEWAY_PORT + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class DltGatewayClientAsync: + def __init__(self, host=None, port=None): + if not host: host = DLT_GATEWAY_HOST + if not port: port = DLT_GATEWAY_PORT + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) + self.channel = None + self.stub = None + self.message_queue: List[DltRecord] = [] + + async def connect(self): + self.channel = grpc.aio.insecure_channel(self.endpoint) + self.stub = DltGatewayServiceStub(self.channel) + LOGGER.debug('Channel created') + + async def close(self): + if self.channel is not None: + await self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + async def RecordToDlt(self, request : DltRecord) -> DltRecordStatus: + LOGGER.debug('RecordToDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordToDlt(request) + LOGGER.debug('RecordToDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def GetFromDlt(self, request : DltRecordId) -> DltRecord: + LOGGER.debug('GetFromDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.GetFromDlt(request) + LOGGER.debug('GetFromDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SubscribeToDlt(self, request : DltRecordSubscription) -> Iterator[DltRecordEvent]: + LOGGER.debug('SubscribeToDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SubscribeToDlt(request) + LOGGER.debug('SubscribeToDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def GetDltStatus(self, request : TeraFlowController) -> DltPeerStatus: + LOGGER.debug('GetDltStatus request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.GetDltStatus(request) + LOGGER.debug('GetDltStatus result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def GetDltPeers(self, request : Empty) -> DltPeerStatusList: + LOGGER.debug('GetDltPeers request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.GetDltPeers(request) + LOGGER.debug('GetDltPeers result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/dlt/connector/service/DltConnectorService.py b/src/dlt/connector/service/DltConnectorService.py index 7e99cb8f85e519ec875a1decc7dc0ad1e030a6f4..601d3e70d298d8ae2a098339e71fc49d66fb79ad 100644 --- a/src/dlt/connector/service/DltConnectorService.py +++ b/src/dlt/connector/service/DltConnectorService.py @@ -14,15 +14,16 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc -from common.tools.service.GenericGrpcService import GenericGrpcService +from common.tools.service.GenericGrpcServiceAsync import GenericGrpcServiceAsync from common.proto.dlt_connector_pb2_grpc import add_DltConnectorServiceServicer_to_server from .DltConnectorServiceServicerImpl import DltConnectorServiceServicerImpl -class DltConnectorService(GenericGrpcService): +class DltConnectorService(GenericGrpcServiceAsync): def __init__(self, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.DLT) super().__init__(port, cls_name=cls_name) self.dltconnector_servicer = DltConnectorServiceServicerImpl() - def install_servicers(self): - add_DltConnectorServiceServicer_to_server(self.dltconnector_servicer, self.server) + async def install_servicers(self): + await self.dltconnector_servicer.initialize() + add_DltConnectorServiceServicer_to_server(self.dltconnector_servicer, self.server) \ No newline at end of file diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py index 42e86b102d3715d68f198d5e341a6f663f35ef4b..c1a4b7b1ea2f978e798d77bfe05cfa3f245283ce 100644 --- a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py +++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py @@ -12,16 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging +import logging +from grpc.aio import ServicerContext from typing import Optional -from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method_async from common.proto.context_pb2 import Empty, TopologyId from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer from common.proto.dlt_gateway_pb2 import DltRecord, DltRecordId, DltRecordOperationEnum, DltRecordTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient -from dlt.connector.client.DltGatewayClient import DltGatewayClient +from dlt.connector.client.DltGatewayClientAsync import DltGatewayClientAsync from .tools.Checkers import record_exists LOGGER = logging.getLogger(__name__) @@ -31,86 +32,91 @@ METRICS_POOL = MetricsPool('DltConnector', 'RPC') class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') + self.dltgateway_client = DltGatewayClientAsync() LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAll(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + async def initialize(self): + await self.dltgateway_client.connect() + + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAll(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllDevices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllDevices(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordDevice(self, request : DltDeviceId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordDevice(self, request : DltDeviceId, context : ServicerContext) -> Empty: data_json = None - if not request.delete: + LOGGER.debug('RECORD_DEVICE = {:s}'.format(grpc_message_to_json_string(request))) + if not request.delete: context_client = ContextClient() device = context_client.GetDevice(request.device_id) data_json = grpc_message_to_json_string(device) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_DEVICE, request.device_id.device_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllLinks(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllLinks(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordLink(self, request : DltLinkId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordLink(self, request : DltLinkId, context : ServicerContext) -> Empty: data_json = None + LOGGER.debug('RECORD_LINK = {:s}'.format(grpc_message_to_json_string(request))) + if not request.delete: context_client = ContextClient() link = context_client.GetLink(request.link_id) data_json = grpc_message_to_json_string(link) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_LINK, request.link_id.link_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllServices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllServices(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordService(self, request : DltServiceId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordService(self, request : DltServiceId, context : ServicerContext) -> Empty: data_json = None if not request.delete: context_client = ContextClient() service = context_client.GetService(request.service_id) data_json = grpc_message_to_json_string(service) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_SERVICE, request.service_id.service_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllSlices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllSlices(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordSlice(self, request : DltSliceId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordSlice(self, request : DltSliceId, context : ServicerContext) -> Empty: data_json = None if not request.delete: context_client = ContextClient() slice_ = context_client.GetSlice(request.slice_id) data_json = grpc_message_to_json_string(slice_) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_SLICE, request.slice_id.slice_uuid.uuid, request.delete, data_json) return Empty() - def _record_entity( + async def _record_entity( self, dlt_domain_uuid : str, dlt_record_type : DltRecordTypeEnum, dlt_record_uuid : str, delete : bool, data_json : Optional[str] = None ) -> None: - dltgateway_client = DltGatewayClient() - dlt_record_id = DltRecordId() dlt_record_id.domain_uuid.uuid = dlt_domain_uuid # pylint: disable=no-member dlt_record_id.type = dlt_record_type @@ -118,7 +124,7 @@ class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): str_dlt_record_id = grpc_message_to_json_string(dlt_record_id) LOGGER.debug('[_record_entity] sent dlt_record_id = {:s}'.format(str_dlt_record_id)) - dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + dlt_record = await self.dltgateway_client.GetFromDlt(dlt_record_id) str_dlt_record = grpc_message_to_json_string(dlt_record) LOGGER.debug('[_record_entity] recv dlt_record = {:s}'.format(str_dlt_record)) @@ -131,17 +137,19 @@ class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_DELETE elif not delete and exists: dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE - if data_json is None: raise Exception('data_json must be provided when updating') + if data_json is None: + raise Exception('data_json must be provided when updating') dlt_record.data_json = data_json elif not delete and not exists: dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_ADD - if data_json is None: raise Exception('data_json must be provided when adding') + if data_json is None: + raise Exception('data_json must be provided when adding') dlt_record.data_json = data_json else: return str_dlt_record = grpc_message_to_json_string(dlt_record) LOGGER.debug('[_record_entity] sent dlt_record = {:s}'.format(str_dlt_record)) - dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + dlt_record_status = await self.dltgateway_client.RecordToDlt(dlt_record) str_dlt_record_status = grpc_message_to_json_string(dlt_record_status) LOGGER.debug('[_record_entity] recv dlt_record_status = {:s}'.format(str_dlt_record_status)) diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py index 5e0fb6f878dca4e244274632e8931bc25a4f1319..d1c9461247176f4f8d2849fb4cd594072d2c3a55 100644 --- a/src/dlt/connector/service/__main__.py +++ b/src/dlt/connector/service/__main__.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, signal, sys, threading + +import logging, signal, threading, asyncio +from typing import Optional from prometheus_client import start_http_server from common.Constants import ServiceNameEnum from common.Settings import ( @@ -22,13 +24,13 @@ from .event_dispatcher.DltEventDispatcher import DltEventDispatcher from .DltConnectorService import DltConnectorService terminate = threading.Event() -LOGGER : logging.Logger = None +LOGGER : Optional[logging.Logger] = None def signal_handler(signal, frame): # pylint: disable=redefined-outer-name LOGGER.warning('Terminate signal received') terminate.set() -def main(): +async def main(): global LOGGER # pylint: disable=global-statement log_level = get_log_level() @@ -55,13 +57,14 @@ def main(): # Starting DLT connector service grpc_service = DltConnectorService() - grpc_service.start() + await grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=1.0): pass + while not terminate.is_set(): + await asyncio.sleep(1.0) LOGGER.info('Terminating...') - grpc_service.stop() + await grpc_service.stop() event_dispatcher.stop() event_dispatcher.join() @@ -69,4 +72,4 @@ def main(): return 0 if __name__ == '__main__': - sys.exit(main()) + asyncio.run(main()) diff --git a/src/dlt/connector/service/tools/Checkers.py b/src/dlt/connector/service/tools/Checkers.py index 6ad0f4b82626740c594829831b08fcefbc15096d..5b19afcd24171767c9b1130c8cb9d594b5afa5bf 100644 --- a/src/dlt/connector/service/tools/Checkers.py +++ b/src/dlt/connector/service/tools/Checkers.py @@ -20,5 +20,5 @@ def record_exists(record : DltRecord) -> bool: exists = exists and (record.record_id.type != DLTRECORDTYPE_UNDEFINED) exists = exists and (len(record.record_id.record_uuid.uuid) > 0) #exists = exists and (record.operation != DLTRECORDOPERATION_UNDEFINED) - exists = exists and (len(record.data_json) > 0) + #exists = exists and (len(record.data_json) > 0) # It conflicts as sometimes records do not have a data_json. return exists diff --git a/src/dlt/connector/tests/basic.py b/src/dlt/connector/tests/basic.py index da400dcbeed261578929262a6cc0bcc29dc5390e..08c5980889d66318b2221f6c62ff7918f05d1efc 100644 --- a/src/dlt/connector/tests/basic.py +++ b/src/dlt/connector/tests/basic.py @@ -17,12 +17,12 @@ # PYTHONPATH=/home/cttc/teraflow/src python -m dlt.connector.tests.basic import logging, sys, time +from common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device from common.proto.dlt_gateway_pb2 import ( DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, DLTRECORDTYPE_UNDEFINED, DltRecord, DltRecordId) from common.tools.object_factory.Device import json_device from common.tools.grpc.Tools import grpc_message_to_json_string -from src.common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device from ..client.DltGatewayClient import DltGatewayClient from ..client.DltEventsCollector import DltEventsCollector diff --git a/src/dlt/gateway/Dockerfile b/src/dlt/gateway/Dockerfile index 5b888b410360330cdf044c88f5832d1738d546e8..ace9cb22ee691efd7565120034a6a1ef36a27aaf 100644 --- a/src/dlt/gateway/Dockerfile +++ b/src/dlt/gateway/Dockerfile @@ -12,30 +12,34 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM zenika/kotlin:1.4-jdk12 +# Use an official Node.js runtime as a parent image +FROM node:20 -# Make working directory move to it and copy DLT Gateway code -RUN mkdir -p /var/teraflow/dlt/gateway -WORKDIR /var/teraflow/dlt/gateway -COPY src/dlt/gateway/. ./ +# Set the working directory in the container +WORKDIR /usr/dltApp -# Make directory for proto files and copy them -RUN mkdir proto -COPY proto/*.proto ./proto/ +# Create proto directory before copying the .proto files +RUN mkdir -p ./proto -# Build DLT Gateway -RUN ./gradlew build +# Copy package.json and package-lock.json +COPY src/dlt/gateway/dltApp/package*.json ./ +# Copy tsconfig.json +COPY src/dlt/gateway/dltApp/tsconfig*.json ./ +# Copy the proto folder contents +COPY proto/acl.proto ./proto/acl.proto +COPY proto/kpi_sample_types.proto ./proto/kpi_sample_types.proto +COPY proto/context.proto ./proto/context.proto +COPY proto/dlt_gateway.proto ./proto/dlt_gateway.proto + +# Copy the src folder +COPY src/dlt/gateway/dltApp/src/ ./src + +# Install dependencies +RUN npm install + +# Expose the port that the gRPC service runs on EXPOSE 50051 -# Create entrypoint.sh script -RUN echo "#!/bin/sh" > /entrypoint.sh -RUN echo "echo 195.37.154.24 peer0.org1.example.com >> /etc/hosts" >> /entrypoint.sh -RUN echo "echo 195.37.154.24 peer0.org2.example.com >> /etc/hosts" >> /entrypoint.sh -RUN echo "echo 195.37.154.24 orderer0.example.com >> /etc/hosts" >> /entrypoint.sh -RUN echo "cd /var/teraflow/dlt/gateway" >> /entrypoint.sh -RUN echo "./gradlew runServer" >> /entrypoint.sh -RUN chmod +x /entrypoint.sh - -# Gateway entry point -ENTRYPOINT ["sh", "/entrypoint.sh"] +# Command to run the service +CMD ["node", "src/dltGateway.js"] diff --git a/src/dlt/gateway/README.md b/src/dlt/gateway/README.md index 2cf6cfeb1682ade5a77f53fe13c96daed6dc33fd..4a38545eae7492e8ec5abac4fdf209da51b00aff 100644 --- a/src/dlt/gateway/README.md +++ b/src/dlt/gateway/README.md @@ -1,134 +1,35 @@ -``` - NEC Laboratories Europe GmbH - - PROPRIETARY INFORMATION - - The software and its source code contain valuable trade secrets and - shall be maintained in confidence and treated as confidential - information. The software may only be used for evaluation and/or - testing purposes, unless otherwise explicitly stated in a written - agreement with NEC Laboratories Europe GmbH. - - Any unauthorized publication, transfer to third parties or - duplication of the object or source code - either totally or in - part - is strictly prohibited. - - Copyright (c) 2022 NEC Laboratories Europe GmbH - All Rights Reserved. - - Authors: Konstantin Munichev <konstantin.munichev@neclab.eu> - - - NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE - WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND - THE ACCOMPANYING DOCUMENTATION. - - NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC - Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR - ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR - LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF - INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, - INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF - OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe - GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. - ``` +# DLT Gateway -# DLT module guide +## Description -## General information -The DLT module is used to provide access to the underlying Fabric deployment. It allows clients -to add, retrieve, modify and delete blockchain-backed data, essentially working as a key-value -database. External clients should use gRPC API to communicate with this service, its detailed -description available below. +The DLT Gateway consists of a **fabricConnect.ts** TypeScript file, which contains the logic for identification +management (certificates required for the MSP), connection management to the blockchain, and finally, it exposes a +contract object with all the required information for interacting with the chaincode. The **fabricConnect.ts** is +coded following the Fabric Gateway API recommendations from Hyperledger Fabric 2.4+. The compiled **fabricConnect.ts** +logic is imported into a **dltGateway.js** file, which contains the gRPC logic for interaction with the TFS controller. +Testing code for various performance tests is included inside the [/tests](./tests/) folder. -## Code structure -The whole DLT module consists of several packages: -- fabric package -- http package -- proto package -- client example +The chaincode is written in Go, providing a reference for the operations that are recorded in the blockchain. This +chaincode must already be deployed in a working Hyperledger Fabric blockchain. -### Fabric package -The most important class in this package is `FabricConnector`. First, it establishes connection -with the underlying Fabric network using Java Gateway SDK. After that, it could be used as a -CRUD interface. -Other files contain auxiliary code for `FabricConnector` which allows it to register/enroll -users and to obtain smart contract instances. +## Requisites -### Grpc package -Contains server side gRPC handler. It accepts requests from the outside and performs the -requested operation. For the more detailed description see Proto package description right below. +* NodeJS +* Docker +* Kubernetes (K8s) + +Sign and TLS certificates, and private key of the MSP user from the Hyperledger Fabric deployment must be copied to the +[/keys](./keys/) directory inside this repository. -### Proto package -The proto package contains `dlt.proto` file which defines gRPC service `DltService` API and messages -it uses. There are 3 main functions: `RecordToDlt` which allows to create/modify/delete data, -`GetFromDlt` which returns already written data and `SubscribeToDlt` which allows clients subscribe -for future create/modify/delete events with provided filters. -Other proto files don't play any significant role and could be safely ignored by end users. +Example: -### Client example -This code is not necessary to the service, but it could be used to test the service. It contains -a sample gRPC client which connects the service and perform all the CRUD operations. +```bash +cp ~/fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/users/User1@org1.example.com/tls/ca.crt src/dlt/gateway/keys/ -# Fabric deployment notes +cp ~/fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/users/User1@org1.example.com/msp/signcerts/User1@org1.example.com-cert.pem src/dlt/gateway/keys/cert.pem -## General notes -Current Fabric deployment uses Fabric test network with some additional helping scripts on top of it. -To start the network just run the `raft.sh` from `blockchain/scripts` directory. Use `stop.sh` -when you need to stop the network. - -## Server start preparations -To run the server it's necessary to copy certificate file -`fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/ca/ca.org1.example.com-cert.pem` -to the config folder (replacing the existing one). Also, it's necessary to copy `scripts/connection-org1.json` -file (again, replacing the old one). After copying, it must be edited. First, all `localhost` entrances -should be replaced with `teraflow.nlehd.de`. Second, `channel` section at the end of the file should be removed. -This should be done after every restart of the Fabric network. - -## Fabric configuration -Even though a test network is easy to deploy and use it's better to perform a custom configuration -for a production deployment. In practice every participating organization will likely prefer to have -its own Peer/Orderer/CA instances to prevent possible dependency on any other participants. This leads -not only to a better privacy/availability/security in general but also to the more complicated -deployment process as a side effect. Here we provide a very brief description of the most important points. - -### Organizations -Organization represents a network participant, which can be an individual, a large corporation or any other -entity. Each organization has its own CAs, orderers and peers. The recommendation here is to create an -organization entity for every independent participant and then decide how many CAs/peers/orderers does -every organization need and which channels should it has access to based on the exact project's goals. - -### Channels -Each channel represents an independent ledger with its own genesis block. Each transaction is executed -on a specific channel, and it's possible to define which organization has access to a given channel. -As a result channels are a pretty powerful privacy mechanism which allows to limit access to the private -data between organization. - -### Certificate authorities, peers and orderers -Certificate authorities (CA) are used to generate crypto materials for each organization. Two types of CA -exist: one is used to generate the certificates of the admin, the MSP and certificates of non-admin users. -Another type of CA is used to generate TLS certificates. As a result it's preferable to have at least two -CAs for every organization. - -Peers are entities which host ledgers and smart contracts. They communicate with applications and orderers, -receiving chaincode invocations (proposals), invoking chaincode, updating ledger when necessary and -returning result of execution. Peers can handle one or many ledgers, depending on the configuration. It's -very use case specific how many peers are necessary to the exact deployment. - -Orderers are used to execute a consensus in a distributing network making sure that every channel participant -has the same blocks with the same data. The default consensus algorithm is Raft which provides only a crash -fault tolerance. - -### Conclusion -As you can see, configuration procedure for Fabric is pretty tricky and includes quite a lot of entities. -In real world it will very likely involve participants from multiple organizations each of them performing -its own part of configuration. +cp ~/fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/users/User1@org1.example.com/msp/keystore/priv_sk src/dlt/gateway/keys/ +``` -As a further reading it's recommended to start with the -[official deployment guide](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployment_guide_overview.html). -It contains a high level overview of a deployment process as well as links to the detailed descriptions to -CA/Peer/Orderer configuration descriptions. \ No newline at end of file +These files are essential for establishing the identity and secure connection to the blockchain. Make sure you replace +the paths with your actual file locations from your Hyperledger Fabric deployment. diff --git a/src/dlt/gateway/.gitignore b/src/dlt/gateway/_legacy/.gitignore similarity index 100% rename from src/dlt/gateway/.gitignore rename to src/dlt/gateway/_legacy/.gitignore diff --git a/src/dlt/gateway/_legacy/Dockerfile b/src/dlt/gateway/_legacy/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5b888b410360330cdf044c88f5832d1738d546e8 --- /dev/null +++ b/src/dlt/gateway/_legacy/Dockerfile @@ -0,0 +1,41 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM zenika/kotlin:1.4-jdk12 + +# Make working directory move to it and copy DLT Gateway code +RUN mkdir -p /var/teraflow/dlt/gateway +WORKDIR /var/teraflow/dlt/gateway +COPY src/dlt/gateway/. ./ + +# Make directory for proto files and copy them +RUN mkdir proto +COPY proto/*.proto ./proto/ + +# Build DLT Gateway +RUN ./gradlew build + +EXPOSE 50051 + +# Create entrypoint.sh script +RUN echo "#!/bin/sh" > /entrypoint.sh +RUN echo "echo 195.37.154.24 peer0.org1.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "echo 195.37.154.24 peer0.org2.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "echo 195.37.154.24 orderer0.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "cd /var/teraflow/dlt/gateway" >> /entrypoint.sh +RUN echo "./gradlew runServer" >> /entrypoint.sh +RUN chmod +x /entrypoint.sh + +# Gateway entry point +ENTRYPOINT ["sh", "/entrypoint.sh"] diff --git a/src/dlt/gateway/_legacy/README.md b/src/dlt/gateway/_legacy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2cf6cfeb1682ade5a77f53fe13c96daed6dc33fd --- /dev/null +++ b/src/dlt/gateway/_legacy/README.md @@ -0,0 +1,134 @@ +``` + NEC Laboratories Europe GmbH + + PROPRIETARY INFORMATION + + The software and its source code contain valuable trade secrets and + shall be maintained in confidence and treated as confidential + information. The software may only be used for evaluation and/or + testing purposes, unless otherwise explicitly stated in a written + agreement with NEC Laboratories Europe GmbH. + + Any unauthorized publication, transfer to third parties or + duplication of the object or source code - either totally or in + part - is strictly prohibited. + + Copyright (c) 2022 NEC Laboratories Europe GmbH + All Rights Reserved. + + Authors: Konstantin Munichev <konstantin.munichev@neclab.eu> + + + NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE + WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND + THE ACCOMPANYING DOCUMENTATION. + + NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC + Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR + ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR + LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF + INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, + INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF + OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe + GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. + ``` + +# DLT module guide + +## General information +The DLT module is used to provide access to the underlying Fabric deployment. It allows clients +to add, retrieve, modify and delete blockchain-backed data, essentially working as a key-value +database. External clients should use gRPC API to communicate with this service, its detailed +description available below. + +## Code structure +The whole DLT module consists of several packages: +- fabric package +- http package +- proto package +- client example + +### Fabric package +The most important class in this package is `FabricConnector`. First, it establishes connection +with the underlying Fabric network using Java Gateway SDK. After that, it could be used as a +CRUD interface. +Other files contain auxiliary code for `FabricConnector` which allows it to register/enroll +users and to obtain smart contract instances. + +### Grpc package +Contains server side gRPC handler. It accepts requests from the outside and performs the +requested operation. For the more detailed description see Proto package description right below. + +### Proto package +The proto package contains `dlt.proto` file which defines gRPC service `DltService` API and messages +it uses. There are 3 main functions: `RecordToDlt` which allows to create/modify/delete data, +`GetFromDlt` which returns already written data and `SubscribeToDlt` which allows clients subscribe +for future create/modify/delete events with provided filters. +Other proto files don't play any significant role and could be safely ignored by end users. + +### Client example +This code is not necessary to the service, but it could be used to test the service. It contains +a sample gRPC client which connects the service and perform all the CRUD operations. + +# Fabric deployment notes + +## General notes +Current Fabric deployment uses Fabric test network with some additional helping scripts on top of it. +To start the network just run the `raft.sh` from `blockchain/scripts` directory. Use `stop.sh` +when you need to stop the network. + +## Server start preparations +To run the server it's necessary to copy certificate file +`fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/ca/ca.org1.example.com-cert.pem` +to the config folder (replacing the existing one). Also, it's necessary to copy `scripts/connection-org1.json` +file (again, replacing the old one). After copying, it must be edited. First, all `localhost` entrances +should be replaced with `teraflow.nlehd.de`. Second, `channel` section at the end of the file should be removed. +This should be done after every restart of the Fabric network. + +## Fabric configuration +Even though a test network is easy to deploy and use it's better to perform a custom configuration +for a production deployment. In practice every participating organization will likely prefer to have +its own Peer/Orderer/CA instances to prevent possible dependency on any other participants. This leads +not only to a better privacy/availability/security in general but also to the more complicated +deployment process as a side effect. Here we provide a very brief description of the most important points. + +### Organizations +Organization represents a network participant, which can be an individual, a large corporation or any other +entity. Each organization has its own CAs, orderers and peers. The recommendation here is to create an +organization entity for every independent participant and then decide how many CAs/peers/orderers does +every organization need and which channels should it has access to based on the exact project's goals. + +### Channels +Each channel represents an independent ledger with its own genesis block. Each transaction is executed +on a specific channel, and it's possible to define which organization has access to a given channel. +As a result channels are a pretty powerful privacy mechanism which allows to limit access to the private +data between organization. + +### Certificate authorities, peers and orderers +Certificate authorities (CA) are used to generate crypto materials for each organization. Two types of CA +exist: one is used to generate the certificates of the admin, the MSP and certificates of non-admin users. +Another type of CA is used to generate TLS certificates. As a result it's preferable to have at least two +CAs for every organization. + +Peers are entities which host ledgers and smart contracts. They communicate with applications and orderers, +receiving chaincode invocations (proposals), invoking chaincode, updating ledger when necessary and +returning result of execution. Peers can handle one or many ledgers, depending on the configuration. It's +very use case specific how many peers are necessary to the exact deployment. + +Orderers are used to execute a consensus in a distributing network making sure that every channel participant +has the same blocks with the same data. The default consensus algorithm is Raft which provides only a crash +fault tolerance. + +### Conclusion +As you can see, configuration procedure for Fabric is pretty tricky and includes quite a lot of entities. +In real world it will very likely involve participants from multiple organizations each of them performing +its own part of configuration. + +As a further reading it's recommended to start with the +[official deployment guide](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployment_guide_overview.html). +It contains a high level overview of a deployment process as well as links to the detailed descriptions to +CA/Peer/Orderer configuration descriptions. \ No newline at end of file diff --git a/src/dlt/gateway/build.gradle.kts b/src/dlt/gateway/_legacy/build.gradle.kts similarity index 100% rename from src/dlt/gateway/build.gradle.kts rename to src/dlt/gateway/_legacy/build.gradle.kts diff --git a/src/dlt/gateway/config/ca.org1.example.com-cert.pem b/src/dlt/gateway/_legacy/config/ca.org1.example.com-cert.pem similarity index 100% rename from src/dlt/gateway/config/ca.org1.example.com-cert.pem rename to src/dlt/gateway/_legacy/config/ca.org1.example.com-cert.pem diff --git a/src/dlt/gateway/config/connection-org1.json b/src/dlt/gateway/_legacy/config/connection-org1.json similarity index 100% rename from src/dlt/gateway/config/connection-org1.json rename to src/dlt/gateway/_legacy/config/connection-org1.json diff --git a/src/dlt/gateway/gradle.properties b/src/dlt/gateway/_legacy/gradle.properties similarity index 100% rename from src/dlt/gateway/gradle.properties rename to src/dlt/gateway/_legacy/gradle.properties diff --git a/src/dlt/gateway/gradle/wrapper/gradle-wrapper.jar b/src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.jar similarity index 100% rename from src/dlt/gateway/gradle/wrapper/gradle-wrapper.jar rename to src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.jar diff --git a/src/dlt/gateway/gradle/wrapper/gradle-wrapper.properties b/src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.properties similarity index 100% rename from src/dlt/gateway/gradle/wrapper/gradle-wrapper.properties rename to src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.properties diff --git a/src/dlt/gateway/gradlew b/src/dlt/gateway/_legacy/gradlew similarity index 100% rename from src/dlt/gateway/gradlew rename to src/dlt/gateway/_legacy/gradlew diff --git a/src/dlt/gateway/gradlew.bat b/src/dlt/gateway/_legacy/gradlew.bat similarity index 100% rename from src/dlt/gateway/gradlew.bat rename to src/dlt/gateway/_legacy/gradlew.bat diff --git a/src/dlt/gateway/settings.gradle.kts b/src/dlt/gateway/_legacy/settings.gradle.kts similarity index 100% rename from src/dlt/gateway/settings.gradle.kts rename to src/dlt/gateway/_legacy/settings.gradle.kts diff --git a/src/dlt/gateway/src/main/kotlin/Main.kt b/src/dlt/gateway/_legacy/src/main/kotlin/Main.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/Main.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/Main.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/ConnectGateway.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/ConnectGateway.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/EnrollAdmin.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/EnrollAdmin.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/EnrollAdmin.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/EnrollAdmin.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/FabricConnector.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/FabricConnector.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/RegisterUser.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/RegisterUser.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/RegisterUser.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/RegisterUser.kt diff --git a/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt b/src/dlt/gateway/_legacy/src/main/kotlin/grpc/FabricServer.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/grpc/FabricServer.kt diff --git a/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt b/src/dlt/gateway/_legacy/src/main/kotlin/grpc/GrpcHandler.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/grpc/GrpcHandler.kt diff --git a/src/dlt/gateway/src/main/kotlin/proto/Config.proto b/src/dlt/gateway/_legacy/src/main/kotlin/proto/Config.proto similarity index 100% rename from src/dlt/gateway/src/main/kotlin/proto/Config.proto rename to src/dlt/gateway/_legacy/src/main/kotlin/proto/Config.proto diff --git a/src/dlt/gateway/chaincode/chaincode.go b/src/dlt/gateway/chaincode/chaincode.go new file mode 100644 index 0000000000000000000000000000000000000000..878cbe9423a7d2cac31e1857343dcdc61034180d --- /dev/null +++ b/src/dlt/gateway/chaincode/chaincode.go @@ -0,0 +1,200 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "log" + + "github.com/hyperledger/fabric-contract-api-go/contractapi" +) + +// Protobuf definitions +type Uuid struct { + Uuid string `json:"uuid"` +} + +type DltRecordId struct { + DomainUuid Uuid `json:"domain_uuid"` + Type string `json:"type"` + RecordUuid Uuid `json:"record_uuid"` +} + +type DltRecord struct { + RecordId DltRecordId `json:"record_id"` + DataJson string `json:"data_json"` +} + +type SmartContract struct { + contractapi.Contract +} + +// InitLedger activates the chaincode +func (s *SmartContract) InitLedger(ctx contractapi.TransactionContextInterface) error { + return nil +} + +func (s *SmartContract) StoreRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId, dataJson string) error { + + key, err := createHashKey(recordId) + if err != nil { + return fmt.Errorf("failed to create hash key: %v", err) + } + + // Check if the same record does not exist before adding it to the ledger + exists, err := s.RecordExists(ctx, key) + if err == nil && exists != nil { + return fmt.Errorf("the record %s already exists", key) + } + + // Trigger an event if the transaction is successful + storedRecord := DltRecord{ + RecordId: recordId, + DataJson: dataJson, + } + eventJson, err := json.Marshal(storedRecord) + if err != nil { + return fmt.Errorf("failed to marshal stored record: %v", err) + } + ctx.GetStub().SetEvent("StoreRecord", eventJson) + + // Store the record in the ledger + return ctx.GetStub().PutState(key, []byte(dataJson)) +} + +func (s *SmartContract) RetrieveRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId) (string, error) { + key, err := createHashKey(recordId) + if err != nil { + return "", fmt.Errorf("failed to create hash key: %v", err) + } + + // Get the record from the ledger + dataBytes, err := ctx.GetStub().GetState(key) + if err != nil || dataBytes == nil { + return "", fmt.Errorf("data not found for key %s", key) + } + return string(dataBytes), nil +} + +func (s *SmartContract) UpdateRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId, dataJson string) error { + key, err := createHashKey(recordId) + if err != nil { + return fmt.Errorf("failed to create hash key: %v", err) + } + + // Check if the record exists before updating it + _, err = s.RecordExists(ctx, key) + if err != nil { + return err + } + + // Trigger an event if the transaction is successful + eventData := DltRecord{RecordId: recordId, DataJson: dataJson} + eventJson, err := json.Marshal(eventData) + if err != nil { + return fmt.Errorf("failed to marshal event data: %v", err) + } + ctx.GetStub().SetEvent("UpdateRecord", eventJson) + + // Update the record in the ledger + return ctx.GetStub().PutState(key, []byte(dataJson)) +} + +func (s *SmartContract) DeleteRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId) error { + key, err := createHashKey(recordId) + if err != nil { + return fmt.Errorf("failed to create hash key: %v", err) + } + + // Check if the record exists before deleting it + exists, err := s.RecordExists(ctx, key) + if err != nil { + return err + } + + // Trigger an event if the transaction is successful + eventData := DltRecord{RecordId: recordId, DataJson: string(exists)} + eventJson, err := json.Marshal(eventData) + if err != nil { + return fmt.Errorf("failed to marshal event data: %v", err) + } + ctx.GetStub().SetEvent("DeleteRecord", eventJson) + + // Delete the record from the ledger + return ctx.GetStub().DelState(key) +} + +func (s *SmartContract) RecordExists(ctx contractapi.TransactionContextInterface, key string) ([]byte, error) { + jsonData, err := ctx.GetStub().GetState(key) + if err != nil { + return nil, fmt.Errorf("failed to read from world state: %v", err) + } + if jsonData == nil { + return nil, fmt.Errorf("the record %s does not exist", key) + } + return jsonData, nil +} + +func (s *SmartContract) GetAllRecords(ctx contractapi.TransactionContextInterface) ([]map[string]interface{}, error) { + // Range query with empty string for startKey and endKey does an + // open-ended query of all records in the chaincode namespace. + resultsIterator, err := ctx.GetStub().GetStateByRange("", "") + if err != nil { + return nil, err + } + defer resultsIterator.Close() + + var records []map[string]interface{} + for resultsIterator.HasNext() { + queryResponse, err := resultsIterator.Next() + if err != nil { + return nil, err + } + + var generic map[string]interface{} + if err := json.Unmarshal(queryResponse.Value, &generic); err != nil { + return nil, fmt.Errorf("invalid JSON data: %v", err) + } + + records = append(records, generic) + } + + return records, nil +} + +func createHashKey(recordId DltRecordId) (string, error) { + recordIdJson, err := json.Marshal(recordId) + if err != nil { + return "", fmt.Errorf("failed to marshal record ID: %v", err) + } + + hash := sha256.New() + hash.Write(recordIdJson) + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func main() { + recordChaincode, err := contractapi.NewChaincode(&SmartContract{}) + if err != nil { + log.Panicf("Error creating chaincode: %v", err) + } + + if err := recordChaincode.Start(); err != nil { + log.Panicf("Error starting chaincode: %v", err) + } +} diff --git a/src/dlt/gateway/chaincode/go.mod b/src/dlt/gateway/chaincode/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..27f86fbabd3d9b169f485ab22ccc42125421afa6 --- /dev/null +++ b/src/dlt/gateway/chaincode/go.mod @@ -0,0 +1,46 @@ +<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. --> + +module chaincode + +go 1.17 + +require github.com/hyperledger/fabric-contract-api-go v1.2.1 + +require ( + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/swag v0.21.1 // indirect + github.com/gobuffalo/envy v1.10.1 // indirect + github.com/gobuffalo/packd v1.0.1 // indirect + github.com/gobuffalo/packr v1.30.1 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/hyperledger/fabric-chaincode-go v0.0.0-20230228194215-b84622ba6a7a // indirect + github.com/hyperledger/fabric-protos-go v0.3.0 // indirect + github.com/joho/godotenv v1.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.53.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/src/dlt/gateway/chaincode/go.sum b/src/dlt/gateway/chaincode/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..8bb9581b596c7f9dd4528d14f5051aa03132421f --- /dev/null +++ b/src/dlt/gateway/chaincode/go.sum @@ -0,0 +1,1240 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cucumber/gherkin-go/v19 v19.0.3/go.mod h1:jY/NP6jUtRSArQQJ5h1FXOUgk5fZK24qtE7vKi776Vw= +github.com/cucumber/godog v0.12.6/go.mod h1:Y02TTpimPXDb70PnG6M3zpODXm1+bjCsuZzcW76xAww= +github.com/cucumber/messages-go/v16 v16.0.0/go.mod h1:EJcyR5Mm5ZuDsKJnT2N9KRnBK30BGjtYotDKpwQ0v6g= +github.com/cucumber/messages-go/v16 v16.0.1/go.mod h1:EJcyR5Mm5ZuDsKJnT2N9KRnBK30BGjtYotDKpwQ0v6g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.10.1 h1:ppDLoXv2feQ5nus4IcgtyMdHQkKng2lhJCIm33cblM0= +github.com/gobuffalo/envy v1.10.1/go.mod h1:AWx4++KnNOW3JOeEvhSaq+mvgAvnMYOY1XSIin4Mago= +github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= +github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr v1.30.1 h1:hu1fuVR3fXEZR7rXNW3h8rqSML8EVAf6KNm0NKO/wKg= +github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= +github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= +github.com/hashicorp/go-memdb v1.3.3/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hyperledger/fabric-chaincode-go v0.0.0-20230228194215-b84622ba6a7a h1:HwSCxEeiBthwcazcAykGATQ36oG9M+HEQvGLvB7aLvA= +github.com/hyperledger/fabric-chaincode-go v0.0.0-20230228194215-b84622ba6a7a/go.mod h1:TDSu9gxURldEnaGSFbH1eMlfSQBWQcMQfnDBcpQv5lU= +github.com/hyperledger/fabric-contract-api-go v1.2.1 h1:Ww9cKH/qHl5s6WqF+Ts5ju5eaBxC/awB/BJE+rOsEkM= +github.com/hyperledger/fabric-contract-api-go v1.2.1/go.mod h1:BhWve0gz1iH+Xc+cO3rmeIZI7YaTWOQodka9CgeUOgo= +github.com/hyperledger/fabric-protos-go v0.3.0 h1:MXxy44WTMENOh5TI8+PCK2x6pMj47Go2vFRKDHB2PZs= +github.com/hyperledger/fabric-protos-go v0.3.0/go.mod h1:WWnyWP40P2roPmmvxsUXSvVI/CF6vwY1K1UFidnKBys= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/src/dlt/gateway/dltApp/package-lock.json b/src/dlt/gateway/dltApp/package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..6fb245621a1e38f84d6806aae2f94fc83394b05a --- /dev/null +++ b/src/dlt/gateway/dltApp/package-lock.json @@ -0,0 +1,2420 @@ +{ + "name": "dlt_gateway", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "dlt_gateway", + "version": "1.0.0", + "license": "Apache-2.0", + "dependencies": { + "@grpc/grpc-js": "^1.10.8", + "@grpc/proto-loader": "^0.7.13", + "@hyperledger/fabric-gateway": "~1.4.0", + "dotenv": "^16.4.5", + "grpc-tools": "^1.12.4", + "protobufjs": "^7.3.0", + "uuid": "^9.0.1" + }, + "devDependencies": { + "@tsconfig/node18": "^18.2.2", + "@types/node": "^18.18.6", + "@typescript-eslint/eslint-plugin": "^6.9.0", + "@typescript-eslint/parser": "^6.9.0", + "eslint": "^8.52.0", + "typescript": "~5.2.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", + "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@grpc/grpc-js": { + "version": "1.10.8", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.10.8.tgz", + "integrity": "sha512-vYVqYzHicDqyKB+NQhAc54I1QWCBLCrYG6unqOIcBTHx+7x8C9lcoLj3KVJXs2VB4lUbpWY+Kk9NipcbXYWmvg==", + "dependencies": { + "@grpc/proto-loader": "^0.7.13", + "@js-sdsl/ordered-map": "^4.4.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/@grpc/proto-loader": { + "version": "0.7.13", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.13.tgz", + "integrity": "sha512-AiXO/bfe9bmxBjxxtYxFAXGZvMaN5s8kO+jBHAJCON8rJoB5YS/D6X7ZNc6XQkuHNmyl4CYaMI1fJ/Gn27RGGw==", + "dependencies": { + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.2.5", + "yargs": "^17.7.2" + }, + "bin": { + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "dev": true + }, + "node_modules/@hyperledger/fabric-gateway": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@hyperledger/fabric-gateway/-/fabric-gateway-1.4.0.tgz", + "integrity": "sha512-dJ0eJdGBo8wtZ/oR5mADHnllp+pSuVOI7uq5fRFf0NTVk1SzlX42Q3kt4j53bJQaxd21TMvofgXNO+BCgJcB/A==", + "dependencies": { + "@grpc/grpc-js": "^1.9.0", + "@hyperledger/fabric-protos": "^0.2.0", + "asn1.js": "^5.4.1", + "bn.js": "^5.2.1", + "elliptic": "^6.5.4", + "google-protobuf": "^3.21.0" + }, + "engines": { + "node": ">=18.12.0" + }, + "optionalDependencies": { + "pkcs11js": "^1.3.0" + } + }, + "node_modules/@hyperledger/fabric-protos": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@hyperledger/fabric-protos/-/fabric-protos-0.2.1.tgz", + "integrity": "sha512-qjm0vIQIfCall804tWDeA8p/mUfu14sl5Sj+PbOn2yDKJq+7ThoIhNsLAqf+BCxUfqsoqQq6AojhqQeTFyOOqg==", + "dependencies": { + "@grpc/grpc-js": "^1.9.0", + "google-protobuf": "^3.21.0" + }, + "engines": { + "node": ">=14.15.0" + } + }, + "node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", + "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/@mapbox/node-pre-gyp": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz", + "integrity": "sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==", + "dependencies": { + "detect-libc": "^2.0.0", + "https-proxy-agent": "^5.0.0", + "make-dir": "^3.1.0", + "node-fetch": "^2.6.7", + "nopt": "^5.0.0", + "npmlog": "^5.0.1", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.11" + }, + "bin": { + "node-pre-gyp": "bin/node-pre-gyp" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + }, + "node_modules/@tsconfig/node18": { + "version": "18.2.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node18/-/node18-18.2.4.tgz", + "integrity": "sha512-5xxU8vVs9/FNcvm3gE07fPbn9tl6tqGGWA9tSlwsUEkBxtRnTsNmwrV8gasZ9F/EobaSv9+nu8AxUKccw77JpQ==", + "dev": true + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@types/node": { + "version": "18.19.33", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.33.tgz", + "integrity": "sha512-NR9+KrpSajr2qBVp/Yt5TU/rp+b5Mayi3+OlMlcg2cVCfRmcG5PWZ7S4+MG9PZ5gWBoc9Pd0BKSRViuBCRPu0A==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/semver": { + "version": "7.5.8", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", + "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", + "dev": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true + }, + "node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/aproba": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz", + "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==" + }, + "node_modules/are-we-there-yet": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz", + "integrity": "sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==", + "deprecated": "This package is no longer supported.", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/asn1.js": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", + "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", + "dependencies": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0", + "safer-buffer": "^2.1.0" + } + }, + "node_modules/asn1.js/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/bn.js": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", + "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==" + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==" + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "bin": { + "color-support": "bin.js" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==" + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==" + }, + "node_modules/detect-libc": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dotenv": { + "version": "16.4.5", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz", + "integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/elliptic": { + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.5.tgz", + "integrity": "sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==", + "dependencies": { + "bn.js": "^4.11.9", + "brorand": "^1.1.0", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.1", + "inherits": "^2.0.4", + "minimalistic-assert": "^1.0.1", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/elliptic/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", + "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "dev": true + }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fs-minipass/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/gauge": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/gauge/-/gauge-3.0.2.tgz", + "integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==", + "deprecated": "This package is no longer supported.", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.2", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.1", + "object-assign": "^4.1.1", + "signal-exit": "^3.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/google-protobuf": { + "version": "3.21.2", + "resolved": "https://registry.npmjs.org/google-protobuf/-/google-protobuf-3.21.2.tgz", + "integrity": "sha512-3MSOYFO5U9mPGikIYCzK0SaThypfGgS6bHqrUGXG3DPHCrb+txNqeEcns1W0lkGfk0rCyNXm7xB9rMxnCiZOoA==" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/grpc-tools": { + "version": "1.12.4", + "resolved": "https://registry.npmjs.org/grpc-tools/-/grpc-tools-1.12.4.tgz", + "integrity": "sha512-5+mLAJJma3BjnW/KQp6JBjUMgvu7Mu3dBvBPd1dcbNIb+qiR0817zDpgPjS7gRb+l/8EVNIa3cB02xI9JLToKg==", + "hasInstallScript": true, + "dependencies": { + "@mapbox/node-pre-gyp": "^1.0.5" + }, + "bin": { + "grpc_tools_node_protoc": "bin/protoc.js", + "grpc_tools_node_protoc_plugin": "bin/protoc_plugin.js" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==" + }, + "node_modules/hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "dependencies": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "node_modules/hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==", + "dependencies": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/long": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", + "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==" + }, + "node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minizlib/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nan": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.19.0.tgz", + "integrity": "sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw==", + "optional": true + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/nopt": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", + "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npmlog": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz", + "integrity": "sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==", + "deprecated": "This package is no longer supported.", + "dependencies": { + "are-we-there-yet": "^2.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^3.0.0", + "set-blocking": "^2.0.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkcs11js": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/pkcs11js/-/pkcs11js-1.3.1.tgz", + "integrity": "sha512-eo7fTeQwYGzX1pFmRaf4ji/WcDW2XKpwqylOwzutsjNWECv6G9PzDHj3Yj5dX9EW/fydMnJG8xvWj/btnQT9TA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.15.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/PeculiarVentures" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/protobufjs": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.3.0.tgz", + "integrity": "sha512-YWD03n3shzV9ImZRX3ccbjqLxj7NokGN0V/ESiBV5xWqrommYHYiihuIyavq03pWSGqlyvYUFmfoMKd+1rPA/g==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/semver": { + "version": "7.6.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.1.tgz", + "integrity": "sha512-f/vbBsu+fOiYt+lmwZV0rVwJScl46HppnOA1ZvIuBWKOTlllpyJ3bfVax76/OrhCH38dyxoDIA8K7uB963IYgA==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/ts-api-utils": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", + "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==", + "dev": true, + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wide-align": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "dependencies": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/src/dlt/gateway/dltApp/package.json b/src/dlt/gateway/dltApp/package.json new file mode 100644 index 0000000000000000000000000000000000000000..9d29b5287e1d3085c1cede9ab73b23189f3228d3 --- /dev/null +++ b/src/dlt/gateway/dltApp/package.json @@ -0,0 +1,38 @@ +{ + "name": "dlt_gateway", + "version": "1.0.0", + "description": "A DLT application that record and manages network related data as JSON, implemented in typeScript using HLF fabric-gateway", + "main": "dist/index.js", + "typings": "dist/index.d.ts", + "engines": { + "node": ">=18" + }, + "scripts": { + "build": "tsc", + "build:watch": "tsc -w", + "lint": "eslint . --ext .ts", + "prepare": "npm run build", + "pretest": "npm run lint", + "start": "node dist/dlt_gateway.js" + }, + "engineStrict": true, + "author": "Javier Jose Diaz (CTTC)", + "license": "Apache-2.0", + "dependencies": { + "@grpc/grpc-js": "^1.10.8", + "@grpc/proto-loader": "^0.7.13", + "@hyperledger/fabric-gateway": "~1.4.0", + "dotenv": "^16.4.5", + "grpc-tools": "^1.12.4", + "protobufjs": "^7.3.0", + "uuid": "^9.0.1" + }, + "devDependencies": { + "@tsconfig/node18": "^18.2.2", + "@types/node": "^18.18.6", + "@typescript-eslint/eslint-plugin": "^6.9.0", + "@typescript-eslint/parser": "^6.9.0", + "eslint": "^8.52.0", + "typescript": "~5.2.2" + } +} diff --git a/src/dlt/gateway/dltApp/src/dltGateway.js b/src/dlt/gateway/dltApp/src/dltGateway.js new file mode 100644 index 0000000000000000000000000000000000000000..656397dc88bfd8289b53e72a8d1dbde9eab32594 --- /dev/null +++ b/src/dlt/gateway/dltApp/src/dltGateway.js @@ -0,0 +1,262 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const path = require('path'); +const { connectToNetwork } = require('../dist/fabricConnect'); +const utf8Decoder = new TextDecoder(); + +// Load the protocol buffer definitions +const PROTO_PATH = path.resolve(__dirname, '../proto/dlt_gateway.proto'); +const packageDefinition = protoLoader.loadSync(PROTO_PATH, { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, +}); +const dltProto = grpc.loadPackageDefinition(packageDefinition).dlt; + +// Create a gRPC server instance +const server = new grpc.Server(); +let contractInstance = null; +let closeConnection = null; +let events = null; // To store the events iterable + +const clients = new Set(); // Set to keep track of active client streams + + + +// Initialize connection to the chaincode + +async function initChaincodeConnection() { + try { + const networkResources = await connectToNetwork(); + contractInstance = networkResources.contract; + events = networkResources.events; // Initiate event listening + closeConnection = networkResources.close; + + //console.log("DEBUG", events) + console.log("Chaincode connection established successfully."); + } catch (error) { + console.error('Failed to establish chaincode connection:', error); + process.exit(1); // Exit if the connection cannot be established + } +} + +// gRPC method to handle recording data to the DLT +async function recordToDlt(call, callback) { + if (!contractInstance) { + callback({ + code: grpc.status.UNAVAILABLE, + details: "Chaincode connection is not established." + }); + return; + } + const { record_id, operation, data_json } = call.request; + try { + + + console.log(`Operation requested: ${operation}`); + + switch (operation) { + case 'DLTRECORDOPERATION_ADD': + await contractInstance.submitTransaction('StoreRecord', JSON.stringify(record_id), data_json); + break; + case 'DLTRECORDOPERATION_UPDATE': + await contractInstance.submitTransaction('UpdateRecord', JSON.stringify(record_id), data_json); + break; + case 'DLTRECORDOPERATION_DELETE': + await contractInstance.submitTransaction('DeleteRecord', JSON.stringify(record_id)); + break; + default: + throw new Error('Invalid operation'); + } + // Send success response + callback(null, { record_id, status: 'DLTRECORDSTATUS_SUCCEEDED' }); + } catch (error) { + // Send failure response with error message + console.log("ERRROR", error) + callback(null, { record_id, status: 'DLTRECORDSTATUS_FAILED', error_message: error.message }); + } +} + +// gRPC method to fetch data from the DLT +async function getFromDlt(call, callback) { + + if (!contractInstance) { + callback({ + code: grpc.status.UNAVAILABLE, + details: "Chaincode connection is not established." + }); + return; + } + + try { + + console.log("RECEIVED CALL REQUEST:", call.request); + //const { record_id, operation, data_json } = call.request; + const resultBytes = await contractInstance.evaluateTransaction('RetrieveRecord', JSON.stringify(call.request)); + // Decode and parse the result + const resultJson = utf8Decoder.decode(resultBytes); + const result = JSON.parse(resultJson); + + // Send the response with the formatted JSON data + callback(null, { record_id: call.request, operation: result.operation, data_json: result.data_json }); + } catch (error) { + if (error.message.includes("data not found for key")) { + // Return an empty response when no record is found + console.log("REQUEST ERROR:", error); + const emptyRecordId = { + domain_uuid: { uuid: "" }, + type: 'DLTRECORDTYPE_UNDEFINED', + record_uuid: { uuid: "" } + }; + callback(null, { record_id: emptyRecordId, data_json: "" }); + } else { + // Send failure response with error message + callback({ + code: grpc.status.UNKNOWN, + details: error.message + }); + } + } +} + +// Implement subscription to DLT events + +const eventNameToEventTypeEnum = { + 'StoreRecord': 'EVENTTYPE_CREATE', + 'UpdateRecord': 'EVENTTYPE_UPDATE', + 'DeleteRecord': 'EVENTTYPE_REMOVE' +}; + +function subscribeToDlt(call) { + if (!events) { + call.emit('error', { + code: grpc.status.UNAVAILABLE, + details: "Event listener is not established." + }); + return; + } + + // Add the client to the set of active clients + clients.add(call); + console.log(`Client connected. Total clients: ${clients.size}`); + + // Clean up when the client disconnects + call.on('cancelled', () => { + clients.delete(call); + console.log(`Client disconnected (cancelled). Total clients: ${clients.size}`); + + }); + call.on('error', (err) => { + clients.delete(call); + console.log(`Client disconnected (error: ${err.message}). Total clients: ${clients.size}`); + + }); + call.on('end', () => { + clients.delete(call); + console.log(`Client disconnected (end). Total clients: ${clients.size}`); + + }); + + (async () => { + try { + for await (const event of events) { + const eventPayload = event.payload; + //console.log("Raw event payload:", eventPayload); + const resultJson = utf8Decoder.decode(eventPayload); + const eventJson = JSON.parse(resultJson); + + console.log("Writing event to stream:", eventJson.record_id); + + const eventType = eventNameToEventTypeEnum[event.eventName] || 'EVENTTYPE_UNDEFINED'; + + for (const client of clients) { + const writeSuccessful = client.write({ + event: { + timestamp: { timestamp: Math.floor(Date.now() / 1000) }, + event_type: eventType // Set appropriate event type + }, + record_id: { + domain_uuid: { uuid: eventJson.record_id.domain_uuid.uuid }, + type: eventJson.record_id.type || 'DLTRECORDTYPE_UNDEFINED', + record_uuid: { uuid: eventJson.record_id.record_uuid.uuid } + } + }); + + // Check if the internal buffer is full + if (!writeSuccessful) { + // Wait for the 'drain' event before continuing + await new Promise((resolve) => client.once('drain', resolve)); + } + } + } + + //call.end(); + + } catch (error) { + for (const client of clients) { + client.emit('error', { + code: grpc.status.UNKNOWN, + details: `Error processing event: ${error.message}` + }); + + } + } + })(); +} + +// Placeholder +function getDltStatus(call, callback) { + // Implement status fetching logic here + // Not implemented for simplicity +} + +// Placeholder +function getDltPeers(call, callback) { + // Implement peers fetching logic here + // Not implemented for simplicity +} + +// Add the service to the server +server.addService(dltProto.DltGatewayService.service, { + RecordToDlt: recordToDlt, + GetFromDlt: getFromDlt, + SubscribeToDlt: subscribeToDlt, + GetDltStatus: getDltStatus, + GetDltPeers: getDltPeers, +}); + +// Start the server +const PORT = process.env.GRPC_PORT || '50051'; +server.bindAsync(`0.0.0.0:${PORT}`, grpc.ServerCredentials.createInsecure(), async (error) => { + if (error) { + console.error('Failed to bind server:', error); + return; + } + console.log(`gRPC server running at http://0.0.0.0:${PORT}`); + await initChaincodeConnection(); //Connects to the chaincode and + server; +}); + +// Handle shutdown gracefully +process.on('SIGINT', async () => { + console.log('Shutting down...'); + await closeConnection(); + server.forceShutdown(); +}); \ No newline at end of file diff --git a/src/dlt/gateway/dltApp/src/fabricConnect.ts b/src/dlt/gateway/dltApp/src/fabricConnect.ts new file mode 100644 index 0000000000000000000000000000000000000000..973fd6077cd3e4ee7d397f8625f4a399935183f9 --- /dev/null +++ b/src/dlt/gateway/dltApp/src/fabricConnect.ts @@ -0,0 +1,209 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as grpc from '@grpc/grpc-js'; +import { connect, Contract, Identity, Signer, signers, Network, CloseableAsyncIterable, ChaincodeEvent, GatewayError } from '@hyperledger/fabric-gateway'; +import * as crypto from 'crypto'; +import { promises as fs } from 'fs'; +import * as path from 'path'; +import { TextDecoder } from 'util'; +import * as dotenv from 'dotenv'; + +dotenv.config({ path: path.resolve(__dirname, '..', '.env') }); +const channelName = getEnvVar('CHANNEL_NAME'); +const chaincodeName = getEnvVar('CHAINCODE_NAME'); +const mspId = getEnvVar('MSP_ID'); + + +// Path to user private key directory. +const keyDirectoryPath = getEnvVar('KEY_DIRECTORY_PATH'); + +// Path to user certificate directory. +const certDirectoryPath = getEnvVar('CERT_DIRECTORY_PATH'); + +// Path to peer tls certificate. +const tlsCertPath = getEnvVar('TLS_CERT_PATH'); + +// Gateway peer endpoint. +const peerEndpoint = getEnvVar('PEER_ENDPOINT'); + +// Gateway peer SSL host name override. +const peerHostAlias = getEnvVar('PEER_HOST_ALIAS'); + +const utf8Decoder = new TextDecoder(); +const assetId = `asset${Date.now()}`; + +export async function connectToNetwork(): Promise<{ contract: Contract, events: CloseableAsyncIterable<ChaincodeEvent>, close: () => Promise<void> }> { + + await displayInputParameters(); + + // The gRPC client connection should be shared by all Gateway connections to this endpoint. + + const client = await newGrpcConnection(); + + const gateway = connect({ + client, + identity: await newIdentity(), + signer: await newSigner(), + // Default timeouts for different gRPC calls + evaluateOptions: () => { + return { deadline: Date.now() + 5000 }; // 5 seconds + }, + endorseOptions: () => { + return { deadline: Date.now() + 15000 }; // 15 seconds + }, + submitOptions: () => { + return { deadline: Date.now() + 5000 }; // 5 seconds + }, + commitStatusOptions: () => { + return { deadline: Date.now() + 60000 }; // 1 minute + }, + }); + + let events: CloseableAsyncIterable<ChaincodeEvent> | undefined; + + + // Get a network instance representing the channel where the smart contract is deployed. + const network = gateway.getNetwork(channelName); + + // Get the smart contract from the network. + const contract = network.getContract(chaincodeName); + + //Listen for events emitted by transactions + //events = await startEventListening(network); + events = await network.getChaincodeEvents(chaincodeName); + + // Initialize the ledger. + await initLedger(contract); + + console.log(Date.now()) + + return { + contract: contract, + events: events, + close: async function () { + if (events) events.close(); + gateway.close(); + client.close(); + } + }; + + +} + +async function newGrpcConnection(): Promise<grpc.Client> { + const tlsRootCert = await fs.readFile(tlsCertPath); + const tlsCredentials = grpc.credentials.createSsl(tlsRootCert); + return new grpc.Client(peerEndpoint, tlsCredentials, { + 'grpc.ssl_target_name_override': peerHostAlias, + }); +} + +async function newIdentity(): Promise<Identity> { + //const certPath = await getFirstDirFileName(certDirectoryPath); + //console.log("DEBUG", certDirectoryPath); + const credentials = await fs.readFile(certDirectoryPath); + return { mspId, credentials }; +} + + +async function newSigner(): Promise<Signer> { + //const keyPath = await getFirstDirFileName(keyDirectoryPath); + //console.log("DEBUG2", keyDirectoryPath); + const privateKeyPem = await fs.readFile(keyDirectoryPath); + const privateKey = crypto.createPrivateKey(privateKeyPem); + return signers.newPrivateKeySigner(privateKey); +} + +/** + * This type of transaction would typically only be run once by an application the first time it was started after its + * initial deployment. A new version of the chaincode deployed later would likely not need to run an "init" function. + */ +async function initLedger(contract: Contract): Promise<void> { + try { + console.log('\n--> Submit Transaction: InitLedger, function activates the chaincode'); + + await contract.submitTransaction('InitLedger'); + + console.log('*** Transaction committed successfully'); + } catch (error) { + console.error('Failed to submit InitLedger transaction:', error); + throw error; + } +} + + + +/** + * getEnvVar() will return the value of an environment variable. + */ +function getEnvVar(varName: string): string { + const value = process.env[varName]; + if (!value) { + throw new Error(`Environment variable ${varName} is not set`); + } + return value; +} + +/** + * displayInputParameters() will print the global scope parameters used by the main driver routine. + */ +async function displayInputParameters(): Promise<void> { + console.log(`channelName: ${channelName}`); + console.log(`chaincodeName: ${chaincodeName}`); + console.log(`mspId: ${mspId}`); + console.log(`keyDirectoryPath: ${keyDirectoryPath}`); + console.log(`certDirectoryPath: ${certDirectoryPath}`); + console.log(`tlsCertPath: ${tlsCertPath}`); + console.log(`peerEndpoint: ${peerEndpoint}`); + console.log(`peerHostAlias: ${peerHostAlias}`); +} + +/** + * startEventListening() will initiate the event listener for chaincode events. + */ +async function startEventListening(network: Network): Promise<CloseableAsyncIterable<ChaincodeEvent>> { + console.log('\n*** Start chaincode event listening'); + + const events = await network.getChaincodeEvents(chaincodeName); + + void readEvents(events); // Don't await - run asynchronously + return events; +} + +/** + * readEvents() format and display the events as a JSON. + */ +async function readEvents(events: CloseableAsyncIterable<ChaincodeEvent>): Promise<void> { + try { + for await (const event of events) { + const payload = parseJson(event.payload); + console.log(`\n<-- Chaincode event received: ${event.eventName} -`, payload); + } + } catch (error: unknown) { + // Ignore the read error when events.close() is called explicitly + if (!(error instanceof GatewayError) || error.code !== grpc.status.CANCELLED.valueOf()) { + throw error; + } + } +} + +/** + * parseJson() formats a JSON. + */ +function parseJson(jsonBytes: Uint8Array): unknown { + const json = utf8Decoder.decode(jsonBytes); + return JSON.parse(json); +} + diff --git a/src/dlt/gateway/dltApp/tsconfig.json b/src/dlt/gateway/dltApp/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..34eddef69221adeca4861399d574047a8c9ae6f1 --- /dev/null +++ b/src/dlt/gateway/dltApp/tsconfig.json @@ -0,0 +1,17 @@ +{ + "extends":"@tsconfig/node18/tsconfig.json", + "compilerOptions": { + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "outDir": "dist", + "declaration": true, + "sourceMap": true, + "noImplicitAny": true + }, + "include": [ + "./src/**/*" ], + "exclude": [ + "./src/**/*.spec.ts" + ] + } + \ No newline at end of file diff --git a/src/dlt/gateway/keys/.gitignore b/src/dlt/gateway/keys/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b54a37fc5e1fa51616f1de2ef20651cb28504040 --- /dev/null +++ b/src/dlt/gateway/keys/.gitignore @@ -0,0 +1,3 @@ +ca.crt +cert.pem +priv_sk diff --git a/src/dlt/gateway/keys/place_hls_keys_in_this_folder b/src/dlt/gateway/keys/place_hls_keys_in_this_folder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/dlt/gateway/samples/sampleTopo.json b/src/dlt/gateway/samples/sampleTopo.json new file mode 100644 index 0000000000000000000000000000000000000000..67115d4be322ed8567688c51a68a95c2b0800138 --- /dev/null +++ b/src/dlt/gateway/samples/sampleTopo.json @@ -0,0 +1,30 @@ +{ + "name": "Network A", + "nodes": [ + { + "id": "node1", + "type": "switch", + "status": "active", + "connections": [ + "node2", + "node3" + ] + }, + { + "id": "node2", + "type": "router", + "status": "inactive", + "connections": [ + "node1" + ] + }, + { + "id": "node3", + "type": "server", + "status": "active", + "connections": [ + "node1" + ] + } + ] +} diff --git a/src/dlt/gateway/samples/topo1.json b/src/dlt/gateway/samples/topo1.json new file mode 100644 index 0000000000000000000000000000000000000000..e4a49981f99339d77538af814365703e463d3db5 --- /dev/null +++ b/src/dlt/gateway/samples/topo1.json @@ -0,0 +1,96 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "eth2", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "eth2", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "SRL1"}}, "device_type": "packet-router", "device_drivers": [8], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.100.100.101"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "57400"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "NokiaSrl1!", "use_tls": true + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "SRL2"}}, "device_type": "packet-router", "device_drivers": [8], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.100.100.102"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "57400"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "NokiaSrl1!", "use_tls": true + }}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "DC1/eth1==SRL1/ethernet-1/2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "SRL1/ethernet-1/2==DC1/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}}, + {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "SRL1/ethernet-1/1==SRL2/ethernet-1/1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "SRL2/ethernet-1/1==SRL1/ethernet-1/1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "DC2/eth1==SRL2/ethernet-1/2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "SRL2/ethernet-1/2==DC2/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}}, + {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + } + ] +} diff --git a/src/dlt/gateway/samples/topo2.json b/src/dlt/gateway/samples/topo2.json new file mode 100644 index 0000000000000000000000000000000000000000..6885c7d9082bfcb17447252636ea4efb0f500283 --- /dev/null +++ b/src/dlt/gateway/samples/topo2.json @@ -0,0 +1,210 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R6"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R7"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/6"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "R1==R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1==R6"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R4==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R4==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R6"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R6==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R6==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]} + ] +} diff --git a/src/dlt/gateway/samples/topo3.json b/src/dlt/gateway/samples/topo3.json new file mode 100644 index 0000000000000000000000000000000000000000..f36fbd7d03a93db61a7233e084f60e7680a54606 --- /dev/null +++ b/src/dlt/gateway/samples/topo3.json @@ -0,0 +1,200 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "name": "admin"} + ], + "topologies": [ + {"topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, "name": "admin"} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "be1.be", "type": "copper"}, {"uuid": "pt1.pt", "type": "copper"}, {"uuid": "uk1.uk", "type": "copper"}, + {"uuid": "es1.es", "type": "copper"}, {"uuid": "it1.it", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "de1.de", "type": "copper"}, {"uuid": "gr1.gr", "type": "copper"}, {"uuid": "uk1.uk", "type": "copper"}, + {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "it1.it", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "de1.de", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}, + {"uuid": "pt1.pt", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "uk1.uk", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}, {"uuid": "gr1.gr", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "uk1.uk", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "es1.es", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "it1.it", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "pt1.pt", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "es1.es", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}, + {"uuid": "gr1.gr", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "it1.it", "type": "copper"}, {"uuid": "de1.de", "type": "copper"}, {"uuid": "be1.be", "type": "copper"} + ]}}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "fr1.fr_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 4.804849}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_fr1.fr"}}, "attributes": {"total_capacity_gbps": 300, "used_capacity_gbps": 55.182499}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_de1.de"}}, "attributes": {"total_capacity_gbps": 600, "used_capacity_gbps": 199.272255}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "de1.de"}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 14.334868}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "pt1.pt_uk1.uk"}}, "attributes": {"total_capacity_gbps": 400, "used_capacity_gbps": 51.415678}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "pt1.pt"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "pt1.pt_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 3.733925}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "pt1.pt"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "pt1.pt_es1.es"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 13.32428}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "es1.es"}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "pt1.pt"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_gr1.gr"}}, "attributes": {"total_capacity_gbps": 800, "used_capacity_gbps": 1.593313}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_fr1.fr"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 98.574706}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_es1.es"}}, "attributes": {"total_capacity_gbps": 300, "used_capacity_gbps": 18.97108}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "es1.es"}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 10.327772}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "gr1.gr_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 7.983659}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "gr1.gr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "gr1.gr_de1.de"}}, "attributes": {"total_capacity_gbps": 5000, "used_capacity_gbps": 4930.897339}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "de1.de"}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "gr1.gr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "gr1.gr_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.895539}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "gr1.gr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_uk1.uk"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 28.144199}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_pt1.pt"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 1.916587}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 3.330747}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_es1.es"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 96.682749}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "es1.es"}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "es1.es_pt1.pt"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 5.643483}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "es1.es"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "es1.es_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 15.353667}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "es1.es"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "es1.es_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 20.517778}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "es1.es"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "de1.de_uk1.uk"}}, "attributes": {"total_capacity_gbps": 600, "used_capacity_gbps": 239.446965}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "de1.de"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "de1.de_gr1.gr"}}, "attributes": {"total_capacity_gbps": 2100, "used_capacity_gbps": 110.602237}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "de1.de"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "de1.de_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 57.709307}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "de1.de"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_pt1.pt"}}, "attributes": {"total_capacity_gbps": 800, "used_capacity_gbps": 652.70225}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_uk1.uk"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 8.252107}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.357069}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_de1.de"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 20.400142}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "de1.de"}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 31.346514}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_gr1.gr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.026822}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]} + ] +} diff --git a/src/dlt/gateway/samples/topo4.json b/src/dlt/gateway/samples/topo4.json new file mode 100644 index 0000000000000000000000000000000000000000..85bbad55eb8608d2d2a7abad7fffab8fffdae682 --- /dev/null +++ b/src/dlt/gateway/samples/topo4.json @@ -0,0 +1,150 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "T1"}}, "device_type": "optical-transponder", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "T1"}}, "endpoint_uuid": {"uuid": "1"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.101"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [{"uuid": "1", "type": "optical", "sample_types": [101, 102, 201, 202]}] + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "T2"}}, "device_type": "optical-transponder", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "T2"}}, "endpoint_uuid": {"uuid": "6"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.102"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [{"uuid": "6", "type": "optical", "sample_types": [101, 102, 201, 202]}] + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "optical-roadm", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "3"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "12"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "13"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.201"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "2"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "3"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "12"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "13"} + ]} + }}] + } + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "optical-roadm", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "4"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "5"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "14"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "15"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.202"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "4"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "5"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "14"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "15"} + ] + }}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "T1->R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "12"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1->T1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "T1"}}, "endpoint_uuid": {"uuid": "1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1->R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "14"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2->R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "13"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T2->R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "15"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2->T2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "T2"}}, "endpoint_uuid": {"uuid": "6"}} + ]} + ] +} diff --git a/src/dlt/gateway/samples/updatedTopo.json b/src/dlt/gateway/samples/updatedTopo.json new file mode 100644 index 0000000000000000000000000000000000000000..74b084b9d9c48de89ddf6122f46316a74c1a4fca --- /dev/null +++ b/src/dlt/gateway/samples/updatedTopo.json @@ -0,0 +1,17 @@ +{ + "name": "Updated Network", + "nodes": [ + { + "id": "node1", + "type": "switch", + "status": "active", + "connections": ["node2"] + }, + { + "id": "node2", + "type": "router", + "status": "inactive", + "connections": ["node1"] + } + ] +} diff --git a/src/dlt/gateway/tests/perfTest.js b/src/dlt/gateway/tests/perfTest.js new file mode 100644 index 0000000000000000000000000000000000000000..fd60bd9c4bba04a7c2a048d26389dd66381b5749 --- /dev/null +++ b/src/dlt/gateway/tests/perfTest.js @@ -0,0 +1,119 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { connectToNetwork } = require('../dltApp/dist/fabricConnect'); +const fsp = require('fs').promises; +const fs = require('fs'); +const util = require('util'); + +const utf8Decoder = new TextDecoder(); +const topoDirectory = '../samples/'; +const topologies = ['topo1.json', 'topo2.json', 'topo3.json', 'topo4.json']; +//const topologies = ['topo4.json']; + +const iterations = 1000; + +async function main() { + try { + const { contract, close } = await connectToNetwork(); + for (const topoFile of topologies) { + const logFilePath = `./operation_times_${topoFile.split('.')[0]}.txt`; // Creates a separate logfile for each topology + const appendFile = util.promisify(fs.appendFile.bind(fs, logFilePath)); + + console.log(`Starting tests for ${topoFile}`); + for (let i = 0; i < iterations; i++) { + console.log(`Iteration ${i + 1} for ${topoFile}`); + await runBlockchainOperations(contract, topoFile, appendFile); + } + } + await close(); // Clean up the connection + } catch (error) { + console.error('An error occurred:', error); + } +} + +async function runBlockchainOperations(contract, topoFile, appendFile) { + const assetId = `asset${Date.now()}`; + const jsonData = await readJsonData(`${topoDirectory}${topoFile}`); + + // Define operations + const operations = [ + { type: 'STORE', jsonData }, + { type: 'UPDATE', jsonData }, + { type: 'FETCH', jsonData: null }, + { type: 'DELETE', jsonData: null }, + { type: 'FETCH_ALL', jsonData: null } + ]; + + for (let op of operations) { + await executeOperation(contract, op.type, assetId, op.jsonData, appendFile); + } +} + +async function readJsonData(filePath) { + try { + return await fsp.readFile(filePath, 'utf8'); + } catch (error) { + console.error(`Failed to read file: ${filePath}`, error); + return '{}'; + } +} + +async function executeOperation(contract, operationType, assetId, jsonData, appendFile) { + const startTime = process.hrtime.bigint(); + try { + let result; + switch (operationType) { + case 'STORE': + result = await contract.submitTransaction('StoreTopoData', assetId, jsonData); + break; + case 'UPDATE': + result = await contract.submitTransaction('UpdateTopoData', assetId, jsonData); + break; + case 'FETCH': + result = await contract.evaluateTransaction('RetrieveTopoData', assetId); + break; + case 'DELETE': + result = await contract.submitTransaction('DeleteTopo', assetId); + break; + case 'FETCH_ALL': + result = await contract.evaluateTransaction('GetAllInfo'); + break; + } + result = utf8Decoder.decode(result); + const operationTime = recordOperationTime(startTime); + await logOperationTime(operationTime, operationType, appendFile); + console.log(`${operationType} Result:`, result); + } catch (error) { + console.error(`Error during ${operationType}:`, error); + } +} + +function recordOperationTime(startTime) { + const endTime = process.hrtime.bigint(); + const operationTime = Number(endTime - startTime) / 1e6; + return operationTime; +} + +async function logOperationTime(operationTime, operationType, appendFile) { + const timestamp = Date.now(); + const logEntry = `${timestamp} - ${operationType} - Execution time: ${operationTime.toFixed(3)} ms\n`; + try { + await appendFile(logEntry); + } catch (error) { + console, error('Error writing to log file:', error); + } +} + +main(); diff --git a/src/dlt/gateway/tests/rateTest.js b/src/dlt/gateway/tests/rateTest.js new file mode 100644 index 0000000000000000000000000000000000000000..f7c40884293220c0ab235322d74aee61074a281c --- /dev/null +++ b/src/dlt/gateway/tests/rateTest.js @@ -0,0 +1,85 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { connectToNetwork } = require('../dltApp/dist/fabricConnect'); +const fs = require('fs'); +const util = require('util'); +const appendFile = util.promisify(fs.appendFile); +const logFilePath = './transaction_times_TPS_TOPO3.txt'; +const utf8Decoder = new TextDecoder(); +const topoDirectory = '../samples/'; + +async function main() { + const { contract, close } = await connectToNetwork(); + try { + const rates = [10, 50, 250, 500]; // Transactions per second + for (let i = 0; i < 1000; i++) { + for (let rate of rates) { + console.log(`Testing at ${rate} TPS`); + await performLoadTest(contract, 1000, rate); + } + } + } finally { + await close(); // Ensure to close the network connection + } +} + +async function performLoadTest(contract, totalTransactions, rate) { + const interval = 1000 / rate; // Calculate interval in milliseconds + let promises = []; + const startTime = Date.now(); + + for (let i = 0; i < totalTransactions; i++) { + // Queue a transaction promise + promises.push(sendTransaction(contract, `asset${Date.now() + i}`)); + + // Process in batches according to the rate + if ((i + 1) % rate === 0 || i === totalTransactions - 1) { + await Promise.all(promises); // Send a batch of transactions + promises = []; // Reset for the next batch + if (i < totalTransactions - 1) { + await new Promise(resolve => setTimeout(resolve, interval)); // Throttle the transaction sending + } + } + } + + const endTime = Date.now(); + const totalTime = endTime - startTime; + const actualRate = totalTransactions / (totalTime / 1000); + console.log(`Total time for ${totalTransactions} transactions at target ${rate} TPS: ${totalTime} ms`); + console.log(`Actual rate achieved: ${actualRate.toFixed(2)} TPS`); + await appendFile(logFilePath, `Target Rate: ${rate} TPS, Total Time: ${totalTime} ms, Actual Rate: ${actualRate.toFixed(2)} TPS\n`); +} + +async function sendTransaction(contract, assetId) { + try { + const jsonData = await readJsonData(`${topoDirectory}topo3.json`); + const result = await contract.submitTransaction('StoreTopoData', assetId, jsonData); + return utf8Decoder.decode(result); + } catch (error) { + console.error('Transaction failed:', error); + return null; + } +} + +async function readJsonData(filePath) { + try { + return await fs.promises.readFile(filePath, 'utf8'); + } catch (error) { + console.error(`Failed to read file: ${filePath}`, error); + return '{}'; + } +} + +main().catch(console.error); diff --git a/src/dlt/gateway/tests/simpleTest.js b/src/dlt/gateway/tests/simpleTest.js new file mode 100644 index 0000000000000000000000000000000000000000..72da03dec16b91c7b6acc21e339f2844d74153be --- /dev/null +++ b/src/dlt/gateway/tests/simpleTest.js @@ -0,0 +1,74 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { connectToNetwork } = require('../dltApp/dist/fabricConnect'); +const fs = require('fs'); +const util = require('util'); +const appendFile = util.promisify(fs.appendFile); +const logFilePath = './transaction_times_TPS2.txt'; +const utf8Decoder = new TextDecoder(); +const topoDirectory = '../samples/'; + +async function main() { + const { contract, close } = await connectToNetwork(); + try { + console.log('Testing with 8 consecutive transactions'); + await performLoadTest(contract); + } finally { + await close(); // Ensure to close the network connection + } +} + +async function performLoadTest(contract) { + const totalTransactions = 500; + const promises = []; + const startTime = Date.now(); + + for (let i = 0; i < totalTransactions; i++) { + // Queue a transaction promise + promises.push(sendTransaction(contract, `asset${startTime}_${i}`)); + } + + await Promise.all(promises); // Send all transactions + + const endTime = Date.now(); + const totalTime = endTime - startTime; + const actualRate = totalTransactions / (totalTime / 1000); + console.log(`Total time for ${totalTransactions} transactions: ${totalTime} ms`); + console.log(`Actual rate achieved: ${actualRate.toFixed(2)} TPS`); + await appendFile(logFilePath, `Total Transactions: ${totalTransactions}, Total Time: ${totalTime} ms, Actual Rate: ${actualRate.toFixed(2)} TPS\n`); +} + +async function sendTransaction(contract, assetId) { + try { + const jsonData = await readJsonData(`${topoDirectory}topo4.json`); + //const jsonData = JSON.stringify({ data: `Data for ${assetId}`}); + const result = await contract.submitTransaction('StoreTopoData', assetId, jsonData); + return utf8Decoder.decode(result); + } catch (error) { + console.error('Transaction failed:', error); + return null; + } +} + +async function readJsonData(filePath) { + try { + return await fs.promises.readFile(filePath, 'utf8'); + } catch (error) { + console.error(`Failed to read file: ${filePath}`, error); + return '{}'; + } +} + +main().catch(console.error); diff --git a/src/dlt/gateway/tests/testEvents.js b/src/dlt/gateway/tests/testEvents.js new file mode 100644 index 0000000000000000000000000000000000000000..6e18e803202232a9613264dd49e6beee14546d14 --- /dev/null +++ b/src/dlt/gateway/tests/testEvents.js @@ -0,0 +1,66 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const path = require('path'); + +const PROTO_PATH = path.resolve(__dirname, '../../../../proto/dlt_gateway.proto'); +const packageDefinition = protoLoader.loadSync(PROTO_PATH, { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, +}); +const dltProto = grpc.loadPackageDefinition(packageDefinition).dlt; + +const client = new dltProto.DltGatewayService( + '127.0.0.1:32001', // Replace with TFS server IP_ADDRESS + grpc.credentials.createInsecure() +); + +function subscribeToDlt() { + const request = { + // Define any necessary subscription filters here if applicable + }; + + const call = client.SubscribeToDlt(request); + + call.on('data', (event) => { + console.log('Received event:', event); + }); + + call.on('error', (error) => { + console.error('Error:', error.message); + }); + + call.on('end', () => { + console.log('Stream ended.'); + }); + + // Optionally, you can cancel the subscription after a certain time or condition + setTimeout(() => { + console.log('Cancelling subscription...'); + call.cancel(); + }, 600000); // Cancel after 1 minute for demonstration purposes +} + +function runTests() { + console.log("Testing subscription to DLT events"); + subscribeToDlt(); +} + +runTests(); diff --git a/src/dlt/gateway/tests/testGateway.js b/src/dlt/gateway/tests/testGateway.js new file mode 100644 index 0000000000000000000000000000000000000000..b08f648daf6f257f44ec94cd13dabccce67cd803 --- /dev/null +++ b/src/dlt/gateway/tests/testGateway.js @@ -0,0 +1,126 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const path = require('path'); +const fs = require('fs').promises; +const { v4: uuidv4 } = require('uuid'); // Import the UUID library + + +const PROTO_PATH = path.resolve(__dirname, '../../../../proto/dlt_gateway.proto'); +const packageDefinition = protoLoader.loadSync(PROTO_PATH, { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, +}); +const dltProto = grpc.loadPackageDefinition(packageDefinition).dlt; + +const client = new dltProto.DltGatewayService( + '127.0.0.1:32001', // Replace with TFS server IP_ADDRESS + grpc.credentials.createInsecure() +); + +const assetId = `asset-${Date.now()}`; +const domainUuid = `domain-${uuidv4()}`; // Generate a pretty domain UUID + +async function getTopoData(filename) { + try { + const data = await fs.readFile(`../samples/${filename}`, 'utf8'); + return data; + } catch (error) { + console.error('Failed to read file:', filename, error); + return '{}'; // Empty JSON if error + } +} + +async function processTopoData(operation, assetId, jsonFilename) { + let jsonData = '{}'; + if (jsonFilename) { + jsonData = await getTopoData(jsonFilename); + } + + const request = { + record_id: { + domain_uuid: { uuid: domainUuid }, // Replace "domain-uuid" with actual domain UUID if needed + type: 'DLTRECORDTYPE_TOPOLOGY', // Use the appropriate type if needed + record_uuid: { uuid: assetId } + }, + operation, + data_json: jsonData + }; + + return new Promise((resolve, reject) => { + client.RecordToDlt(request, (error, response) => { + if (error) { + console.error('Error:', error.message); + reject(error); + } else { + console.log('Response:', response); + resolve(response); + } + }); + }); +} + +async function getDLTData(assetId) { + + const request = { + domain_uuid: { uuid: domainUuid }, // Replace "domain-uuid" with actual domain UUID if needed + type: 'DLTRECORDTYPE_TOPOLOGY', // Use the appropriate type if needed + record_uuid: { uuid: assetId } + }; + + return new Promise((resolve, reject) => { + client.GetFromDlt(request, (error, response) => { + if (error) { + console.error('Error:', error.message); + reject(error); + } else { + console.log('Response:', response); + resolve(response); + } + }); + }); +} + +async function runTests() { + console.log("Testing Store Operation"); + await processTopoData('DLTRECORDOPERATION_ADD', assetId, 'topo2.json'); + + console.log("Testing Update Operation"); + await processTopoData('DLTRECORDOPERATION_UPDATE', assetId, 'topo3.json'); + + console.log("Testing Fetch Operation"); + await getDLTData(assetId); + + + console.log("Testing Delete Operation"); + await processTopoData('DLTRECORDOPERATION_DELETE', assetId); + + console.log("Testing Fetch All Operation"); + // This part assumes you have a GetAllInfo method implemented in your chaincode and corresponding gRPC service. + // client.GetAllInfo({}, (error, response) => { + // if (error) { + // console.error('Error:', error.message); + // } else { + // console.log('All Data:', response); + // } + // }); +} + +runTests().catch(console.error); diff --git a/src/interdomain/Config.py b/src/interdomain/Config.py index 918f60d79d51ee8f9fe3875b630de2c373c01a5a..d9098447bb107a878acf297d66eaf1573d72691d 100644 --- a/src/interdomain/Config.py +++ b/src/interdomain/Config.py @@ -15,6 +15,7 @@ from common.Settings import get_setting SETTING_NAME_TOPOLOGY_ABSTRACTOR = 'TOPOLOGY_ABSTRACTOR' +SETTING_NAME_DLT_INTEGRATION = 'DLT_INTEGRATION' TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'} def is_topology_abstractor_enabled() -> bool: @@ -22,3 +23,9 @@ def is_topology_abstractor_enabled() -> bool: if is_enabled is None: return False str_is_enabled = str(is_enabled).upper() return str_is_enabled in TRUE_VALUES + +def is_dlt_enabled() -> bool: + is_enabled = get_setting(SETTING_NAME_DLT_INTEGRATION, default=None) + if is_enabled is None: return False + str_is_enabled = str(is_enabled).upper() + return str_is_enabled in TRUE_VALUES diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py index 54878be2ed70fa834692b85ce27337d61160948b..bce5e69203417e9026ddabc336a260216d0fd991 100644 --- a/src/interdomain/service/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/InterdomainServiceServicerImpl.py @@ -34,7 +34,7 @@ from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient -from dlt.connector.client.DltConnectorClient import DltConnectorClient +from dlt.connector.client.DltConnectorClientAsync import DltConnectorClientAsync from pathcomp.frontend.client.PathCompClient import PathCompClient from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient @@ -88,7 +88,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): ]) if len(env_vars) == 2: # DLT available - dlt_connector_client = DltConnectorClient() + dlt_connector_client = DltConnectorClientAsync() dlt_connector_client.connect() else: dlt_connector_client = None diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py index c0497bd2902080f8b967ba50ce370a4c9b711689..7ab9682c2789ed11c4f066fa4b453e8c1212878b 100644 --- a/src/interdomain/service/__main__.py +++ b/src/interdomain/service/__main__.py @@ -18,8 +18,9 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables) -from interdomain.Config import is_topology_abstractor_enabled +from interdomain.Config import is_dlt_enabled, is_topology_abstractor_enabled from .topology_abstractor.TopologyAbstractor import TopologyAbstractor +from .topology_abstractor.DltRecorder import DLTRecorder from .InterdomainService import InterdomainService from .RemoteDomainClients import RemoteDomainClients @@ -69,12 +70,21 @@ def main(): topology_abstractor = TopologyAbstractor() topology_abstractor.start() + # Subscribe to Context Events + dlt_enabled = is_dlt_enabled() + if dlt_enabled: + LOGGER.info('Starting DLT functionality...') + dlt_recorder = DLTRecorder() + dlt_recorder.start() + # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') if topology_abstractor_enabled: topology_abstractor.stop() + if dlt_enabled: + dlt_recorder.stop() grpc_service.stop() remote_domain_clients.stop() diff --git a/src/interdomain/service/topology_abstractor/DltRecordSender.py b/src/interdomain/service/topology_abstractor/DltRecordSender.py index a504fe01b4e5755b880b76114c60d28f186f3f6f..5c53edc598ee57b856ebea3bd1eaf2e855b0cf6b 100644 --- a/src/interdomain/service/topology_abstractor/DltRecordSender.py +++ b/src/interdomain/service/topology_abstractor/DltRecordSender.py @@ -12,24 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -from typing import Dict, List, Optional, Tuple +import asyncio, logging +from typing import Dict, List, Tuple from common.proto.context_pb2 import Device, Link, Service, Slice, TopologyId from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from context.client.ContextClient import ContextClient -from dlt.connector.client.DltConnectorClient import DltConnectorClient -from .Types import DltRecordTypes +from dlt.connector.client.DltConnectorClientAsync import DltConnectorClientAsync LOGGER = logging.getLogger(__name__) class DltRecordSender: - def __init__(self, context_client : ContextClient, dlt_connector_client : Optional[DltConnectorClient]) -> None: + def __init__(self, context_client : ContextClient) -> None: self.context_client = context_client - self.dlt_connector_client = dlt_connector_client + LOGGER.debug('Creating Servicer...') + self.dlt_connector_client = DltConnectorClientAsync() + LOGGER.debug('Servicer Created') self.dlt_record_uuids : List[str] = list() - self.dlt_record_uuid_to_data : Dict[str, Tuple[TopologyId, DltRecordTypes]] = dict() + self.dlt_record_uuid_to_data : Dict[str, Tuple[TopologyId, object]] = dict() - def _add_record(self, record_uuid : str, data : Tuple[TopologyId, DltRecordTypes]) -> None: + async def initialize(self): + await self.dlt_connector_client.connect() + + def _add_record(self, record_uuid : str, data : Tuple[TopologyId, object]) -> None: if record_uuid in self.dlt_record_uuid_to_data: return self.dlt_record_uuid_to_data[record_uuid] = data self.dlt_record_uuids.append(record_uuid) @@ -60,36 +64,45 @@ class DltRecordSender: record_uuid = '{:s}:slice:{:s}/{:s}'.format(topology_uuid, context_uuid, slice_uuid) self._add_record(record_uuid, (topology_id, slice_)) - def commit(self) -> None: + async def commit(self) -> None: + if not self.dlt_connector_client: + LOGGER.error('DLT Connector Client is None, cannot commit records.') + return + + tasks = [] # List to hold all the async tasks + for dlt_record_uuid in self.dlt_record_uuids: - topology_id,dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid] + topology_id, dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid] if isinstance(dlt_record, Device): - device_id = self.context_client.SetDevice(dlt_record) + device_id = dlt_record.device_id if self.dlt_connector_client is None: continue dlt_device_id = DltDeviceId() dlt_device_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_device_id.device_id.CopyFrom(device_id) # pylint: disable=no-member - self.dlt_connector_client.RecordDevice(dlt_device_id) + tasks.append(self.dlt_connector_client.RecordDevice(dlt_device_id)) elif isinstance(dlt_record, Link): - link_id = self.context_client.SetLink(dlt_record) + link_id = dlt_record.link_id if self.dlt_connector_client is None: continue dlt_link_id = DltLinkId() dlt_link_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_link_id.link_id.CopyFrom(link_id) # pylint: disable=no-member - self.dlt_connector_client.RecordLink(dlt_link_id) + tasks.append(self.dlt_connector_client.RecordLink(dlt_link_id)) elif isinstance(dlt_record, Service): - service_id = self.context_client.SetService(dlt_record) + service_id = dlt_record.service_id if self.dlt_connector_client is None: continue dlt_service_id = DltServiceId() dlt_service_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_service_id.service_id.CopyFrom(service_id) # pylint: disable=no-member - self.dlt_connector_client.RecordService(dlt_service_id) + tasks.append(self.dlt_connector_client.RecordService(dlt_service_id)) elif isinstance(dlt_record, Slice): - slice_id = self.context_client.SetSlice(dlt_record) + slice_id = dlt_record.slice_id if self.dlt_connector_client is None: continue dlt_slice_id = DltSliceId() dlt_slice_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member - self.dlt_connector_client.RecordSlice(dlt_slice_id) + tasks.append(self.dlt_connector_client.RecordSlice(dlt_slice_id)) else: - LOGGER.error('Unsupported Record({:s})'.format(str(dlt_record))) + LOGGER.error(f'Unsupported Record({str(dlt_record)})') + + if tasks: + await asyncio.gather(*tasks) # Run all the tasks concurrently diff --git a/src/interdomain/service/topology_abstractor/DltRecorder.py b/src/interdomain/service/topology_abstractor/DltRecorder.py new file mode 100644 index 0000000000000000000000000000000000000000..22c436363b009810815b1cf3fa011fd5cbbc6a13 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/DltRecorder.py @@ -0,0 +1,206 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, threading, asyncio, time +from typing import Dict, Optional +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME +from common.proto.context_pb2 import ( + ContextEvent, ContextId, DeviceEvent, DeviceId, LinkId, LinkEvent, TopologyId, TopologyEvent +) +from common.tools.context_queries.Context import create_context +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from .DltRecordSender import DltRecordSender +from .Types import EventTypes + +LOGGER = logging.getLogger(__name__) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) +INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id=ADMIN_CONTEXT_ID)) + + +class DLTRecorder(threading.Thread): + def __init__(self) -> None: + super().__init__(daemon=True) + self.terminate = threading.Event() + self.context_client = ContextClient() + self.context_event_collector = EventsCollector(self.context_client) + self.topology_cache: Dict[str, TopologyId] = {} + + # Queues for each event type + self.create_event_queue = asyncio.Queue() + self.update_event_queue = asyncio.Queue() + self.remove_event_queue = asyncio.Queue() + + def stop(self): + self.terminate.set() + + def run(self) -> None: + asyncio.run(self._run()) + + async def _run(self) -> None: + self.context_client.connect() + create_context(self.context_client, DEFAULT_CONTEXT_NAME) + #self.create_topologies() + self.context_event_collector.start() + + batch_timeout = 1 # Time in seconds to wait before processing whatever tasks are available + last_task_time = time.time() + + while not self.terminate.is_set(): + event = self.context_event_collector.get_event(timeout=0.1) + if event: + LOGGER.info('Received Event({:s})...'.format(grpc_message_to_json_string(event))) + + # Prioritize the event based on its type + if event.event.event_type == 1: # CREATE + await self.create_event_queue.put(event) + elif event.event.event_type == 2: # UPDATE + await self.update_event_queue.put(event) + elif event.event.event_type == 3: # REMOVE + await self.remove_event_queue.put(event) + + # Check if it's time to process the tasks or if we have enough tasks + current_time = time.time() + if current_time - last_task_time >= batch_timeout: + await self.process_events() + last_task_time = current_time # Reset the timer after processing + + self.context_event_collector.stop() + self.context_client.close() + + async def process_events(self): + # Process CREATE events first + await self.process_queue(self.create_event_queue) + # Then process UPDATE events + await self.process_queue(self.update_event_queue) + # Finally, process REMOVE events + await self.process_queue(self.remove_event_queue) + + async def process_queue(self, queue : asyncio.Queue): + tasks = [] + while not queue.empty(): + event = await queue.get() + LOGGER.info('Processing Event({:s}) from queue...'.format(grpc_message_to_json_string(event))) + task = asyncio.create_task(self.update_record(event)) + tasks.append(task) + + # Execute tasks concurrently + if tasks: + try: + await asyncio.gather(*tasks) + except Exception as e: + LOGGER.error(f"Error while processing tasks: {e}") + + async def update_record(self, event : EventTypes) -> None: + dlt_record_sender = DltRecordSender(self.context_client) + await dlt_record_sender.initialize() # Ensure DltRecordSender is initialized asynchronously + LOGGER.debug('STARTING processing event: {:s}'.format(grpc_message_to_json_string(event))) + + if isinstance(event, ContextEvent): + LOGGER.debug('Processing ContextEvent({:s})'.format(grpc_message_to_json_string(event))) + LOGGER.warning('Ignoring ContextEvent({:s})'.format(grpc_message_to_json_string(event))) + + elif isinstance(event, TopologyEvent): + LOGGER.debug('Processing TopologyEvent({:s})'.format(grpc_message_to_json_string(event))) + self.process_topology_event(event, dlt_record_sender) + + elif isinstance(event, DeviceEvent): + LOGGER.debug('Processing DeviceEvent ASYNC({:s})'.format(grpc_message_to_json_string(event))) + self.process_device_event(event, dlt_record_sender) + + elif isinstance(event, LinkEvent): + LOGGER.debug('Processing LinkEvent({:s})'.format(grpc_message_to_json_string(event))) + self.process_link_event(event, dlt_record_sender) + + else: + LOGGER.warning('Unsupported Event({:s})'.format(grpc_message_to_json_string(event))) + + await dlt_record_sender.commit() + #await asyncio.sleep(2) # Simulates processing time + LOGGER.debug('Finished processing event: {:s}'.format(grpc_message_to_json_string(event))) + + + def process_topology_event(self, event : TopologyEvent, dlt_record_sender : DltRecordSender) -> None: + topology_id = event.topology_id + topology_uuid = topology_id.topology_uuid.uuid + context_id = topology_id.context_id + context_uuid = context_id.context_uuid.uuid + topology_uuids = {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME} + + context = self.context_client.GetContext(context_id) + context_name = context.name + + topology_details = self.context_client.GetTopologyDetails(topology_id) + topology_name = topology_details.name + + self.topology_cache[topology_uuid] = topology_id + + LOGGER.debug('TOPOLOGY Details({:s})'.format(grpc_message_to_json_string(topology_details))) + + if ((context_uuid == DEFAULT_CONTEXT_NAME) or (context_name == DEFAULT_CONTEXT_NAME)) and \ + (topology_uuid not in topology_uuids) and (topology_name not in topology_uuids): + LOGGER.debug('DEVICES({:s})'.format(grpc_message_to_json_string(topology_details.devices))) + for device in topology_details.devices: + LOGGER.debug('DEVICE_INFO_TOPO({:s})'.format(grpc_message_to_json_string(device))) + dlt_record_sender.add_device(topology_id, device) + + for link in topology_details.links: + dlt_record_sender.add_link(topology_id, link) + + else: + MSG = 'Ignoring ({:s}/{:s})({:s}/{:s}) TopologyEvent({:s})' + args = context_uuid, context_name, topology_uuid, topology_name, grpc_message_to_json_string(event) + LOGGER.warning(MSG.format(*args)) + + def find_topology_for_device(self, device_id : DeviceId) -> Optional[TopologyId]: + for topology_uuid, topology_id in self.topology_cache.items(): + details = self.context_client.GetTopologyDetails(topology_id) + for device in details.devices: + if device.device_id == device_id: + return topology_id + return None + + def find_topology_for_link(self, link_id : LinkId) -> Optional[TopologyId]: + for topology_uuid, topology_id in self.topology_cache.items(): + details = self.context_client.GetTopologyDetails(topology_id) + for link in details.links: + if link.link_id == link_id: + return topology_id + return None + + def process_device_event(self, event : DeviceEvent, dlt_record_sender : DltRecordSender) -> None: + device_id = event.device_id + device = self.context_client.GetDevice(device_id) + topology_id = self.find_topology_for_device(device_id) + if topology_id: + LOGGER.debug('DEVICE_INFO({:s}), DEVICE_ID ({:s})'.format( + str(device.device_id.device_uuid.uuid), + grpc_message_to_json_string(device_id) + )) + dlt_record_sender.add_device(topology_id, device) + else: + LOGGER.warning("Topology not found for device {:s}".format(str(device_id.device_uuid.uuid))) + + def process_link_event(self, event: LinkEvent, dlt_record_sender: DltRecordSender) -> None: + link_id = event.link_id + link = self.context_client.GetLink(link_id) + topology_id = self.find_topology_for_link(link_id) + if topology_id: + dlt_record_sender.add_link(topology_id, link) + else: + LOGGER.warning("Topology not found for link {:s}".format(str(link_id.link_uuid.uuid))) diff --git a/src/kpi_manager/README.md b/src/kpi_manager/README.md index c1feadcc4843db26a219d1e3b37833ddd80b18dc..6e9b56d9349aa6acd5c41004e32c933619a37f65 100644 --- a/src/kpi_manager/README.md +++ b/src/kpi_manager/README.md @@ -1,29 +1,24 @@ # How to locally run and test KPI manager micro-service -## --- File links need to be updated. --- ### Pre-requisets -The following requirements should be fulfilled before the execuation of KPI management service. +Ensure the following requirements are met before executing the KPI management service: -1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. -2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully. -3. Verify the creation of required database and table. -[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and -[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment. +1. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/requirements.in) sucessfully installed. +2. Verify the creation of required database and table. The +[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_db.py) python file lists the functions to create tables and the database. The +[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/database/KpiEngine.py) file contains the DB string. ### Messages format templates -["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing. +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_messages.py) python file contains templates for creating gRPC messages. -### Test file -["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment. +### Unit test file +The ["KPI manager test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_manager.py) python file lists various tests conducted to validate functionality. ### Flow of execution (Kpi Maanager Service functions) 1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table. -2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types. +2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor to the `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types. -3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB. +3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from the DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from the DB. -4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types. - -## For KPI composer and KPI writer -The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in). +4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types. diff --git a/src/kpi_manager/database/KpiEngine.py b/src/kpi_manager/database/KpiEngine.py index dff406de666b5f68539b8897fa26e0b3ad51286b..0fce7e3d36cf2f03a18f311c815719a4f17b2869 100644 --- a/src/kpi_manager/database/KpiEngine.py +++ b/src/kpi_manager/database/KpiEngine.py @@ -16,8 +16,6 @@ import logging, sqlalchemy from common.Settings import get_setting LOGGER = logging.getLogger(__name__) - -# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}' CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' class KpiEngine: @@ -33,12 +31,10 @@ class KpiEngine: CRDB_SSLMODE = get_setting('CRDB_SSLMODE') crdb_uri = CRDB_URI_TEMPLATE.format( CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) - # crdb_uri = CRDB_URI_TEMPLATE.format( - # CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) try: engine = sqlalchemy.create_engine(crdb_uri, echo=False) LOGGER.info(' KpiDBmanager initalized with DB URL: {:}'.format(crdb_uri)) except: # pylint: disable=bare-except # pragma: no cover LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) return None # type: ignore - return engine + return engine diff --git a/src/kpi_manager/database/Kpi_DB.py b/src/kpi_manager/database/Kpi_DB.py index 4b60640707c8d0c2ce90e5ab135ddf6fd4c91f63..49ad9c9b579daa918818366a1d9505089968edc2 100644 --- a/src/kpi_manager/database/Kpi_DB.py +++ b/src/kpi_manager/database/Kpi_DB.py @@ -34,14 +34,15 @@ class KpiDB: def create_database(self) -> None: if not sqlalchemy_utils.database_exists(self.db_engine.url): - LOGGER.debug("Database created. {:}".format(self.db_engine.url)) sqlalchemy_utils.create_database(self.db_engine.url) + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) def drop_database(self) -> None: if sqlalchemy_utils.database_exists(self.db_engine.url): sqlalchemy_utils.drop_database(self.db_engine.url) def create_tables(self): + # TODO: use "get_tables(declatrative class obj)" method of "sqlalchemy_utils" to verify tables. try: KpiModel.metadata.create_all(self.db_engine) # type: ignore LOGGER.debug("Tables created in the DB Name: {:}".format(self.db_name)) @@ -69,8 +70,7 @@ class KpiDB: session.rollback() if "psycopg2.errors.UniqueViolation" in str(e): LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") - raise AlreadyExistsException(row.__class__.__name__, row, - extra_details=["Unique key voilation: {:}".format(e)] ) + raise AlreadyExistsException(row.__class__.__name__, row, extra_details=["Unique key voilation: {:}".format(e)] ) else: LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) @@ -89,7 +89,6 @@ class KpiDB: print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) return None except Exception as e: - session.rollback() LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) finally: diff --git a/src/kpi_manager/tests/test_kpi_db.py b/src/kpi_manager/tests/test_kpi_db.py index e961c12bacdbac07f111b229435ed3d89d62581f..d4a57f83664f851504389b3bbe99d5c2a92542d9 100644 --- a/src/kpi_manager/tests/test_kpi_db.py +++ b/src/kpi_manager/tests/test_kpi_db.py @@ -21,8 +21,8 @@ LOGGER = logging.getLogger(__name__) def test_verify_databases_and_Tables(): LOGGER.info('>>> test_verify_Tables : START <<< ') kpiDBobj = KpiDB() - kpiDBobj.drop_database() - kpiDBobj.verify_tables() + # kpiDBobj.drop_database() + # kpiDBobj.verify_tables() kpiDBobj.create_database() kpiDBobj.create_tables() kpiDBobj.verify_tables() diff --git a/src/kpi_manager/tests/test_kpi_manager.py b/src/kpi_manager/tests/test_kpi_manager.py index f0d9526d33694a683b70180eb3bc6de833bf1cfa..219fdadee9e2f4ca9ea9ac0be040043d4edfbdbe 100755 --- a/src/kpi_manager/tests/test_kpi_manager.py +++ b/src/kpi_manager/tests/test_kpi_manager.py @@ -17,7 +17,7 @@ import os, pytest import logging from typing import Union -#from common.proto.context_pb2 import Empty +from common.proto.context_pb2 import Empty from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) @@ -26,12 +26,6 @@ from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList from common.tools.service.GenericGrpcService import GenericGrpcService -#from context.client.ContextClient import ContextClient - -# from device.service.driver_api.DriverFactory import DriverFactory -# from device.service.driver_api.DriverInstanceCache import DriverInstanceCache -# from device.service.DeviceService import DeviceService -# from device.client.DeviceClient import DeviceClient from kpi_manager.tests.test_messages import create_kpi_descriptor_request, create_kpi_filter_request, create_kpi_descriptor_request_a from kpi_manager.service.KpiManagerService import KpiManagerService @@ -39,12 +33,6 @@ from kpi_manager.client.KpiManagerClient import KpiManagerClient from kpi_manager.tests.test_messages import create_kpi_descriptor_request from kpi_manager.tests.test_messages import create_kpi_id_request - -#from monitoring.service.NameMapping import NameMapping - -#os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' -#from device.service.drivers import DRIVERS - ########################### # Tests Setup ########################### @@ -55,8 +43,6 @@ KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) # t os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT) -# METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME'){} - LOGGER = logging.getLogger(__name__) class MockContextService(GenericGrpcService): @@ -70,84 +56,10 @@ class MockContextService(GenericGrpcService): self.context_servicer = MockServicerImpl_Context() add_ContextServiceServicer_to_server(self.context_servicer, self.server) -# @pytest.fixture(scope='session') -# def context_service(): -# LOGGER.info('Initializing MockContextService...') -# _service = MockContextService(MOCKSERVICE_PORT) -# _service.start() - -# LOGGER.info('Yielding MockContextService...') -# yield _service - -# LOGGER.info('Terminating MockContextService...') -# _service.context_servicer.msg_broker.terminate() -# _service.stop() - -# LOGGER.info('Terminated MockContextService...') - -# @pytest.fixture(scope='session') -# def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing ContextClient...') -# _client = ContextClient() - -# LOGGER.info('Yielding ContextClient...') -# yield _client - -# LOGGER.info('Closing ContextClient...') -# _client.close() - -# LOGGER.info('Closed ContextClient...') - -# @pytest.fixture(scope='session') -# def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing DeviceService...') -# driver_factory = DriverFactory(DRIVERS) -# driver_instance_cache = DriverInstanceCache(driver_factory) -# _service = DeviceService(driver_instance_cache) -# _service.start() - -# # yield the server, when test finishes, execution will resume to stop it -# LOGGER.info('Yielding DeviceService...') -# yield _service - -# LOGGER.info('Terminating DeviceService...') -# _service.stop() - -# LOGGER.info('Terminated DeviceService...') - -# @pytest.fixture(scope='session') -# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing DeviceClient...') -# _client = DeviceClient() - -# LOGGER.info('Yielding DeviceClient...') -# yield _client - -# LOGGER.info('Closing DeviceClient...') -# _client.close() - -# LOGGER.info('Closed DeviceClient...') - -# @pytest.fixture(scope='session') -# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing DeviceClient...') -# _client = DeviceClient() - -# LOGGER.info('Yielding DeviceClient...') -# yield _client - -# LOGGER.info('Closing DeviceClient...') -# _client.close() - -# LOGGER.info('Closed DeviceClient...') - # This fixture will be requested by test cases and last during testing session @pytest.fixture(scope='session') def kpi_manager_service(): LOGGER.info('Initializing KpiManagerService...') - #name_mapping = NameMapping() - # _service = MonitoringService(name_mapping) - # _service = KpiManagerService(name_mapping) _service = KpiManagerService() _service.start() @@ -181,35 +93,28 @@ def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disab # Prepare Environment, should be the first test ################################################## -# # ERROR on this test --- -# def test_prepare_environment( -# context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument -# ): -# context_id = json_context_id(DEFAULT_CONTEXT_NAME) -# context_client.SetContext(Context(**json_context(DEFAULT_CONTEXT_NAME))) -# context_client.SetTopology(Topology(**json_topology(DEFAULT_TOPOLOGY_NAME, context_id=context_id))) ########################### # Tests Implementation of Kpi Manager ########################### # ---------- 3rd Iteration Tests ---------------- -# def test_SetKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiId) +def test_SetKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") + response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) -# def test_DeleteKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ") -# # adding KPI -# response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# # deleting KPI -# del_response = kpi_manager_client.DeleteKpiDescriptor(response_id) -# # select KPI -# kpi_manager_client.GetKpiDescriptor(response_id) -# LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response)) -# assert isinstance(del_response, Empty) +def test_DeleteKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ") + # adding KPI + response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # deleting KPI + del_response = kpi_manager_client.DeleteKpiDescriptor(response_id) + # select KPI + kpi_manager_client.GetKpiDescriptor(response_id) + LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response)) + assert isinstance(del_response, Empty) def test_GetKpiDescriptor(kpi_manager_client): LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") @@ -225,77 +130,18 @@ def test_GetKpiDescriptor(kpi_manager_client): assert isinstance(response, KpiDescriptor) -# def test_SelectKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ") -# # adding KPI -# kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# # select KPI(s) -# response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiDescriptorList) - -# def test_set_list_of_KPIs(kpi_manager_client): -# LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ") -# KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"] -# # adding KPI -# for kpi in KPIs_TO_SEARCH: -# kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi)) - - -# ---------- 2nd Iteration Tests ----------------- -# def test_SetKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") -# with open("kpi_manager/tests/KPI_configs.json", 'r') as file: -# data = json.load(file) -# _descriptors = data.get('KPIs', []) -# for _descritor_name in _descriptors: -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(_descritor_name)) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiId) - -# def test_GetKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") -# response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiDescriptor) - -# def test_DeleteKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ") -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# del_response = kpi_manager_client.DeleteKpiDescriptor(response) -# kpi_manager_client.GetKpiDescriptor(response) -# LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response)) -# assert isinstance(del_response, Empty) - -# def test_SelectKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ") -# kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a()) -# response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request_a()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiDescriptorList) - -# ------------- INITIAL TESTs ---------------- -# Test case that makes use of client fixture to test server's CreateKpi method -# def test_set_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name -# # make call to server -# LOGGER.warning('test_create_kpi requesting') -# for i in range(3): -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_request(str(i+1))) -# LOGGER.debug(str(response)) -# assert isinstance(response, KpiId) - -# # Test case that makes use of client fixture to test server's DeleteKpi method -# def test_delete_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name -# # make call to server -# LOGGER.warning('delete_kpi requesting') -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_request('4')) -# response = kpi_manager_client.DeleteKpiDescriptor(response) -# LOGGER.debug(str(response)) -# assert isinstance(response, Empty) +def test_SelectKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ") + # adding KPI + kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # select KPI(s) + response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request()) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiDescriptorList) -# # Test case that makes use of client fixture to test server's GetKpiDescriptor method -# def test_select_kpi_descriptor(kpi_manager_client): # pylint: disable=redefined-outer-name -# LOGGER.warning('test_selectkpidescritor begin') -# response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request()) -# LOGGER.debug(str(response)) -# assert isinstance(response, KpiDescriptorList) +def test_set_list_of_KPIs(kpi_manager_client): + LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ") + KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"] + # adding KPI + for kpi in KPIs_TO_SEARCH: + kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi)) diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml index 166e9d3cbcf3eb09c914384a9906853dddd7bfb5..1a6f821ba9e798bb4220d914109ab3a65f0f1792 100644 --- a/src/kpi_value_api/.gitlab-ci.yml +++ b/src/kpi_value_api/.gitlab-ci.yml @@ -50,10 +50,30 @@ unit_test kpi-value-api: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi - docker container prune -f script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - - docker run --name $IMAGE_NAME -d -p 30020:30020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME -d -p 30020:30020 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - sleep 5 - docker ps -a - docker logs $IMAGE_NAME @@ -74,7 +94,7 @@ unit_test kpi-value-api: - src/$IMAGE_NAME/**/*.{py,in,yml} - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - - src/$IMAGE_NAME/tests/Dockerfile + # - src/$IMAGE_NAME/tests/Dockerfile # mayne not needed - manifests/${IMAGE_NAME}service.yaml - .gitlab-ci.yml artifacts: diff --git a/src/kpi_value_api/Dockerfile b/src/kpi_value_api/Dockerfile index 7dd8d307b8338c4a29e97c742ca12a49c4611e0a..25b8da931f88000dd229c536456a3eb1fa7f56db 100644 --- a/src/kpi_value_api/Dockerfile +++ b/src/kpi_value_api/Dockerfile @@ -63,6 +63,8 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory WORKDIR /var/teraflow COPY src/kpi_value_api/. kpi_value_api/ +COPY src/kpi_manager/__init__.py kpi_manager/__init__.py +COPY src/kpi_manager/client/. kpi_manager/client/ # Start the service ENTRYPOINT ["python", "-m", "kpi_value_api.service"] diff --git a/src/kpi_value_api/README.md b/src/kpi_value_api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..70ba2c5e79c79147e336307ecc6d5ddfc263df90 --- /dev/null +++ b/src/kpi_value_api/README.md @@ -0,0 +1,23 @@ +# How to locally run and test KPI Value API micro-service + +### Pre-requisets +Ensure the following requirements are met before executing the KPI Value API service. + +1. The KPI Manger service is running and Apache Kafka is running. + +2. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/requirements.in) file sucessfully installed. + +3. Call the ["create_all_topics()"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/common/tools/kafka/Variables.py) function to verify the existence of all required topics on kafka. + +### Messages format templates +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/messages.py) python file contains templates for creating gRPC messages. + +### Unit test file +The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/test_kpi_value_api.py) python file enlist various tests conducted to validate functionality. + +### Flow of execution (Kpi Maanager Service functions) +1. Call the `create_new_topic_if_not_exists(<list of string>)` method to create any new topics if needed. + +2. Call `StoreKpiValues(KpiValueList)` to produce `Kpi Value` on a Kafka Topic. (The `KpiValueWriter` microservice will consume and process the `Kpi Value`) + +3. Call `SelectKpiValues(KpiValueFilter) -> KpiValueList` to read metric from the Prometheus DB. diff --git a/src/kpi_value_api/requirements.in b/src/kpi_value_api/requirements.in index 7e4694109dc4e1d31b86abfc03162494faafcdaf..f5695906a8d02d55e15960a76986b8d03f02dba1 100644 --- a/src/kpi_value_api/requirements.in +++ b/src/kpi_value_api/requirements.in @@ -14,3 +14,4 @@ confluent-kafka==2.3.* requests==2.27.* +prometheus-api-client==0.5.3 \ No newline at end of file diff --git a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py index d27de54f3cddfd0d70d656a89c45adc50e518289..4ea978fafc8d7454d41f64182d553d030215113a 100644 --- a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py +++ b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py @@ -12,98 +12,141 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, grpc, requests -from typing import Tuple, Any -from datetime import datetime +import logging, grpc, json +from typing import Dict from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from common.proto.context_pb2 import Empty +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceServicer from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter, KpiValue, KpiValueType from confluent_kafka import Producer as KafkaProducer +from prometheus_api_client import PrometheusConnect +from prometheus_api_client.utils import parse_datetime + +from kpi_manager.client.KpiManagerClient import KpiManagerClient LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('KpiValueAPI', 'NBIgRPC') -PROM_URL = "http://localhost:9090" +PROM_URL = "http://prometheus-k8s.monitoring.svc.cluster.local:9090" # TODO: updated with the env variables class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer): def __init__(self): LOGGER.debug('Init KpiValueApiService') - + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def StoreKpiValues(self, request: KpiValueList, grpc_context: grpc.ServicerContext ) -> Empty: LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request)) - producer_obj = KafkaProducer({ - 'bootstrap.servers' : KafkaConfig.SERVER_IP.value - }) + + producer = self.kafka_producer for kpi_value in request.kpi_value_list: - kpi_value_to_produce : Tuple [str, Any, Any] = ( - kpi_value.kpi_id.kpi_id, - kpi_value.timestamp, - kpi_value.kpi_value_type # kpi_value.kpi_value_type.(many options) how? - ) + kpi_value_to_produce : Dict = { + "kpi_uuid" : kpi_value.kpi_id.kpi_id.uuid, + "timestamp" : kpi_value.timestamp.timestamp, + "kpi_value_type" : self.ExtractKpiValueByType(kpi_value.kpi_value_type) + } LOGGER.debug('KPI to produce is {:}'.format(kpi_value_to_produce)) msg_key = "gRPC-kpivalueapi" # str(__class__.__name__) can be used - producer_obj.produce( + producer.produce( KafkaTopic.VALUE.value, key = msg_key, - value = kpi_value.SerializeToString(), # value = json.dumps(kpi_value_to_produce), + value = json.dumps(kpi_value_to_produce), callback = self.delivery_callback ) - producer_obj.flush() + producer.flush() return Empty() + def ExtractKpiValueByType(self, value): + attributes = [ 'floatVal' , 'int32Val' , 'uint32Val','int64Val', + 'uint64Val', 'stringVal', 'boolVal'] + for attr in attributes: + try: + return getattr(value, attr) + except (ValueError, TypeError, AttributeError): + continue + return None + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SelectKpiValues(self, request: KpiValueFilter, grpc_context: grpc.ServicerContext ) -> KpiValueList: LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request)) response = KpiValueList() - metrics = [kpi.kpi_id for kpi in request.kpi_id] - start_timestamps = [timestamp for timestamp in request.start_timestamp] - end_timestamps = [timestamp for timestamp in request.end_timestamp] - results = [] + + kpi_manager_client = KpiManagerClient() + prom_connect = PrometheusConnect(url=PROM_URL) - for start, end in zip(start_timestamps, end_timestamps): - start_str = datetime.fromtimestamp(start.seconds).isoformat() + "Z" - end_str = datetime.fromtimestamp(end.seconds).isoformat() + "Z" + metrics = [self.GetKpiSampleType(kpi, kpi_manager_client) for kpi in request.kpi_id] + start_timestamps = [parse_datetime(timestamp) for timestamp in request.start_timestamp] + end_timestamps = [parse_datetime(timestamp) for timestamp in request.end_timestamp] + prom_response = [] + for start_time, end_time in zip(start_timestamps, end_timestamps): for metric in metrics: - url = f'{PROM_URL}/api/v1/query_range' - params = { - 'query': metric, - 'start': start_str, - 'end' : end_str, - 'step' : '30s' # or any other step you need - } - response = requests.get(url, params=params) - if response.status_code == 200: - data = response.json() - for result in data['data']['result']: - for value in result['values']: - kpi_value = KpiValue( - kpi_id=metric, - timestamp=str(seconds=value[0]), - kpi_value_type=self._convert_value_to_kpi_value_type(value[1]) - ) - results.append(kpi_value) - - def _convert_value_to_kpi_value_type(self, value): + print(start_time, end_time, metric) + LOGGER.debug(">>> Query: {:}".format(metric)) + prom_response.append( + prom_connect.custom_query_range( + query = metric, # this is the metric name and label config + start_time = start_time, + end_time = end_time, + step = 30, # or any other step value (missing in gRPC Filter request) + ) + ) + + for single_resposne in prom_response: + # print ("{:}".format(single_resposne)) + for record in single_resposne: + # print("Record >>> kpi: {:} >>> time & values set: {:}".format(record['metric']['__name__'], record['values'])) + for value in record['values']: + # print("{:} - {:}".format(record['metric']['__name__'], value)) + kpi_value = KpiValue() + kpi_value.kpi_id.kpi_id = record['metric']['__name__'], + kpi_value.timestamp = value[0], + kpi_value.kpi_value_type = self.ConverValueToKpiValueType(value[1]) + response.kpi_value_list.append(kpi_value) + return response + + def GetKpiSampleType(self, kpi_value: str, kpi_manager_client): + print("--- START -----") + + kpi_id = KpiId() + kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid + # print("KpiId generated: {:}".format(kpi_id)) + + try: + kpi_descriptor_object = KpiDescriptor() + kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id) + # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object??? + if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid: + LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + return KpiSampleType.Name(kpi_descriptor_object.kpi_sample_type) # extract and return the name of KpiSampleType + else: + LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + except Exception as e: + LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e)) + print ("Unable to get KpiDescriptor. Error: {:}".format(e)) + + def ConverValueToKpiValueType(self, value): # Check if the value is an integer (int64) try: - int64_value = int(value) - return KpiValueType(int64Val=int64_value) - except ValueError: + int_value = int(value) + return KpiValueType(int64Val=int_value) + except (ValueError, TypeError): pass # Check if the value is a float try: float_value = float(value) return KpiValueType(floatVal=float_value) - except ValueError: + except (ValueError, TypeError): pass # Check if the value is a boolean if value.lower() in ['true', 'false']: @@ -112,7 +155,6 @@ class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer): # If none of the above, treat it as a string return KpiValueType(stringVal=value) - def delivery_callback(self, err, msg): if err: LOGGER.debug('Message delivery failed: {:}'.format(err)) else: LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) diff --git a/src/kpi_value_api/tests/messages.py b/src/kpi_value_api/tests/messages.py index c2a1cbb0b275fb26d6498e4470f3869a105a8d36..d8ad14bd44eebc1e9412cfd5ff2973e6018c95e9 100644 --- a/src/kpi_value_api/tests/messages.py +++ b/src/kpi_value_api/tests/messages.py @@ -18,8 +18,9 @@ from common.proto.kpi_value_api_pb2 import KpiValue, KpiValueList def create_kpi_value_list(): _create_kpi_value_list = KpiValueList() - # To run this experiment sucessfully, already existing UUID in KPI DB in necessary. - # because the UUID is used to get the descriptor form KPI DB. + # To run this experiment sucessfully, add an existing UUID of a KPI Descriptor from the KPI DB. + # This UUID is used to get the descriptor form the KPI DB. If the Kpi ID does not exists, + # some part of the code won't execute. EXISTING_KPI_IDs = ["725ce3ad-ac67-4373-bd35-8cd9d6a86e09", str(uuid.uuid4()), str(uuid.uuid4())] diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml index 25619ce7f8b4346172587dbf2e804896aff20e4d..9a2f9fd47e435b26e2e3a335bd9b95da58a0517f 100644 --- a/src/kpi_value_writer/.gitlab-ci.yml +++ b/src/kpi_value_writer/.gitlab-ci.yml @@ -50,10 +50,30 @@ unit_test kpi-value-writer: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi - docker container prune -f script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - - docker run --name $IMAGE_NAME -d -p 30030:30030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME -d -p 30030:30030 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - sleep 5 - docker ps -a - docker logs $IMAGE_NAME @@ -64,6 +84,8 @@ unit_test kpi-value-writer: coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: - docker rm -f $IMAGE_NAME + - docker rm -f zookeeper + - docker rm -f kafka - docker network rm teraflowbridge rules: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' diff --git a/src/kpi_value_writer/README.md b/src/kpi_value_writer/README.md index 72ba6e5594adeef4a29d650615716c26273ed115..c45a0e39534fae9efef4174d5ca5be7047845c48 100644 --- a/src/kpi_value_writer/README.md +++ b/src/kpi_value_writer/README.md @@ -1,29 +1,17 @@ -# How to locally run and test KPI manager micro-service +# How to locally run and test the KPI Value Writer micro-service -## --- File links need to be updated. --- ### Pre-requisets -The following requirements should be fulfilled before the execuation of KPI management service. +Ensure the following requirements are meet before executing the KPI Value Writer service> -1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. -2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully. -3. Verify the creation of required database and table. -[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and -[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment. +1. The KPI Manger and KPI Value API services are running and Apache Kafka is running. -### Messages format templates -["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing. - -### Test file -["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment. - -### Flow of execution (Kpi Maanager Service functions) -1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table. +2. A Virtual enviornment exist with all the required packages listed in the ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/requirements.in) file installed sucessfully. -2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types. - -3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB. +### Messages format templates +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_messages.py) python file contains the templates to create gRPC messages. -4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types. +### Unit test file +The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_kpi_value_writer.py) python file enlist various tests conducted to validate functionality. -## For KPI composer and KPI writer -The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in). \ No newline at end of file +### Flow of execution +1. Call the `RunKafkaConsumer` method from the `KpiValueWriter` class to start consuming the `KPI Value` generated by the `KPI Value API` or `Telemetry`. For every valid `KPI Value` consumer from Kafka, it invokes the `PrometheusWriter` class to prepare and push the metric to the Promethues DB. diff --git a/src/kpi_value_writer/service/KpiValueWriter.py b/src/kpi_value_writer/service/KpiValueWriter.py index 26bab44657606b1f3edc14659d128c5ccc7a6890..8b258a1424cc44be4dcb9134ee913c707cc44bfa 100644 --- a/src/kpi_value_writer/service/KpiValueWriter.py +++ b/src/kpi_value_writer/service/KpiValueWriter.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging import threading from common.tools.kafka.Variables import KafkaConfig, KafkaTopic @@ -33,32 +34,30 @@ from .MetricWriterToPrometheus import MetricWriterToPrometheus LOGGER = logging.getLogger(__name__) ACTIVE_CONSUMERS = [] -METRIC_WRITER = MetricWriterToPrometheus() class KpiValueWriter(GenericGrpcService): def __init__(self, cls_name : str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.KPIVALUEWRITER) super().__init__(port, cls_name=cls_name) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'KpiValueWriter', + 'auto.offset.reset' : 'latest'}) - @staticmethod - def RunKafkaConsumer(): - thread = threading.Thread(target=KpiValueWriter.KafkaConsumer, args=()) + def RunKafkaConsumer(self): + thread = threading.Thread(target=self.KafkaKpiConsumer, args=()) ACTIVE_CONSUMERS.append(thread) thread.start() - @staticmethod - def KafkaConsumer(): - kafka_consumer = KafkaConsumer( - { 'bootstrap.servers' : KafkaConfig.SERVER_IP.value, - 'group.id' : __class__, - 'auto.offset.reset' : 'latest'} - ) + def KafkaKpiConsumer(self): kpi_manager_client = KpiManagerClient() - kafka_consumer.subscribe([KafkaTopic.VALUE.value]) + metric_writer = MetricWriterToPrometheus() + + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.VALUE.value]) LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value)) print("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value)) while True: - raw_kpi = kafka_consumer.poll(1.0) + raw_kpi = consumer.poll(1.0) if raw_kpi is None: continue elif raw_kpi.error(): @@ -68,33 +67,29 @@ class KpiValueWriter(GenericGrpcService): print("Consumer error: {}".format(raw_kpi.error())) continue try: - kpi_value = KpiValue() - kpi_value.ParseFromString(raw_kpi.value()) + kpi_value = json.loads(raw_kpi.value().decode('utf-8')) LOGGER.info("Received KPI : {:}".format(kpi_value)) print("Received KPI : {:}".format(kpi_value)) - KpiValueWriter.get_kpi_descriptor(kpi_value, kpi_manager_client) + self.get_kpi_descriptor(kpi_value, kpi_manager_client, metric_writer) except Exception as e: print("Error detail: {:}".format(e)) continue - @staticmethod - def get_kpi_descriptor(kpi_value: str, kpi_manager_client ): + def get_kpi_descriptor(self, kpi_value: str, kpi_manager_client, metric_writer): print("--- START -----") kpi_id = KpiId() - kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid + kpi_id.kpi_id.uuid = kpi_value['kpi_uuid'] print("KpiId generated: {:}".format(kpi_id)) # print("Kpi manger client created: {:}".format(kpi_manager_client)) - try: kpi_descriptor_object = KpiDescriptor() kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id) + # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object??? if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid: - # print("kpi descriptor received: {:}".format(kpi_descriptor_object)) - # if isinstance (kpi_descriptor_object, KpiDescriptor): LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) - METRIC_WRITER.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value) + metric_writer.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value) else: LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) diff --git a/src/kpi_value_writer/service/MetricWriterToPrometheus.py b/src/kpi_value_writer/service/MetricWriterToPrometheus.py index b681164786bd310d457998bae55b836522888b94..85e618a4b5b330cb83cf255652e7be8dff2dabd3 100644 --- a/src/kpi_value_writer/service/MetricWriterToPrometheus.py +++ b/src/kpi_value_writer/service/MetricWriterToPrometheus.py @@ -14,11 +14,9 @@ # read Kafka stream from Kafka topic -import ast -import time -import threading import logging -from prometheus_client import start_http_server, Gauge, CollectorRegistry +from typing import Dict +from prometheus_client import Gauge from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.proto.kpi_value_api_pb2 import KpiValue @@ -26,7 +24,6 @@ from common.proto.kpi_manager_pb2 import KpiDescriptor LOGGER = logging.getLogger(__name__) PROM_METRICS = {} -PROM_REGISTERY = CollectorRegistry() class MetricWriterToPrometheus: ''' @@ -34,13 +31,7 @@ class MetricWriterToPrometheus: cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message) ''' def __init__(self): - # prometheus server address and configs - self.start_prometheus_client() pass - - def start_prometheus_client(self): - start_http_server(10808, registry=PROM_REGISTERY) - LOGGER.debug("Prometheus client is started on port 10808") def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value): # Creating a dictionary from the kpi_descriptor's attributes @@ -54,25 +45,24 @@ class MetricWriterToPrometheus: 'slice_id' : kpi_descriptor.slice_id.slice_uuid.uuid, 'connection_id' : kpi_descriptor.connection_id.connection_uuid.uuid, 'link_id' : kpi_descriptor.link_id.link_uuid.uuid, - 'time_stamp' : kpi_value.timestamp.timestamp, - 'kpi_value' : kpi_value.kpi_value_type.floatVal + 'time_stamp' : kpi_value['timestamp'], + 'kpi_value' : kpi_value['kpi_value_type'] } # LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi)) return cooked_kpi - def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: KpiValue): + def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: Dict): # merge both gRPC messages into single varible. cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value) - tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} # extracted values will be used as metric tag - metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude] + tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} + metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude] # These values will be used as metric tags metric_name = cooked_kpi['kpi_sample_type'] try: if metric_name not in PROM_METRICS: # Only register the metric, when it doesn't exists PROM_METRICS[metric_name] = Gauge ( metric_name, cooked_kpi['kpi_description'], - metric_tags, - registry=PROM_REGISTERY + metric_tags ) LOGGER.debug("Metric is created with labels: {:}".format(metric_tags)) PROM_METRICS[metric_name].labels( @@ -84,7 +74,7 @@ class MetricWriterToPrometheus: connection_id = cooked_kpi['connection_id'], link_id = cooked_kpi['link_id'], time_stamp = cooked_kpi['time_stamp'], - ).set(float(cooked_kpi['kpi_value'])) + ).set(cooked_kpi['kpi_value']) LOGGER.debug("Metric pushed to the endpoints: {:}".format(PROM_METRICS[metric_name])) except ValueError as e: @@ -93,4 +83,5 @@ class MetricWriterToPrometheus: print("Metric {:} is already registered. Skipping.".format(metric_name)) else: LOGGER.error("Error while pushing metric: {}".format(e)) - raise \ No newline at end of file + raise + diff --git a/src/kpi_value_writer/service/__main__.py b/src/kpi_value_writer/service/__main__.py index aa67540fb899781297d1235dc2e15bcbb2c38585..be9f8f29bfdb2397eedd0ce2821c5da8f778cfc4 100644 --- a/src/kpi_value_writer/service/__main__.py +++ b/src/kpi_value_writer/service/__main__.py @@ -13,6 +13,7 @@ # limitations under the License. import logging, signal, sys, threading +from prometheus_client import start_http_server from kpi_value_writer.service.KpiValueWriter import KpiValueWriter from common.Settings import get_log_level @@ -38,6 +39,8 @@ def main(): grpc_service = KpiValueWriter() grpc_service.start() + start_http_server(10808) + LOGGER.debug("Prometheus client is started on port 10808") # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=1.0): pass diff --git a/src/kpi_value_writer/tests/test_kpi_value_writer.py b/src/kpi_value_writer/tests/test_kpi_value_writer.py index 572495d48d70cdc40c0ef6bb1efcf877e2a610ee..b784fae5da713f9bd7cd7a1668f48b080f7a84fa 100755 --- a/src/kpi_value_writer/tests/test_kpi_value_writer.py +++ b/src/kpi_value_writer/tests/test_kpi_value_writer.py @@ -14,31 +14,12 @@ import logging from kpi_value_writer.service.KpiValueWriter import KpiValueWriter + from common.tools.kafka.Variables import KafkaTopic -from kpi_manager.client.KpiManagerClient import KpiManagerClient -from kpi_manager.tests.test_messages import create_kpi_descriptor_request -from common.proto.kpi_manager_pb2 import KpiDescriptor -from kpi_value_writer.tests.test_messages import create_kpi_id_request -LOGGER = logging.getLogger(__name__) -# def test_GetKpiDescriptor(): -# LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") -# kpi_manager_client = KpiManagerClient() -# # adding KPI -# LOGGER.info(" --->>> calling SetKpiDescriptor ") -# response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# # get KPI -# LOGGER.info(" --->>> calling GetKpiDescriptor with response ID") -# response = kpi_manager_client.GetKpiDescriptor(response_id) -# LOGGER.info("Response gRPC message object: {:}".format(response)) - -# LOGGER.info(" --->>> calling GetKpiDescriptor with random ID") -# rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request()) -# LOGGER.info("Response gRPC message object: {:}".format(rand_response)) -# LOGGER.info("\n------------------ TEST FINISHED ---------------------\n") -# assert isinstance(response, KpiDescriptor) +LOGGER = logging.getLogger(__name__) # -------- Initial Test ---------------- def test_validate_kafka_topics(): @@ -48,5 +29,5 @@ def test_validate_kafka_topics(): def test_KafkaConsumer(): LOGGER.debug(" --->>> test_kafka_consumer: START <<<--- ") - KpiValueWriter.RunKafkaConsumer() - + kpi_value_writer = KpiValueWriter() + kpi_value_writer.RunKafkaConsumer() diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index ca70fa9386e356e7e49397365701013e1d3a1697..494e59e3c6698c6ae052e1f4b35b7a56e8dc3fba 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -26,7 +26,8 @@ SERVICE_TYPE_VALUES = { ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, ServiceTypeEnum.SERVICETYPE_TE, ServiceTypeEnum.SERVICETYPE_E2E, - ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, + ServiceTypeEnum.SERVICETYPE_QKD, } DEVICE_DRIVER_VALUES = { @@ -41,7 +42,8 @@ DEVICE_DRIVER_VALUES = { DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS, DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN, - DeviceDriverEnum.DEVICEDRIVER_OC + DeviceDriverEnum.DEVICEDRIVER_OC, + DeviceDriverEnum.DEVICEDRIVER_QKD, } # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 8b5e2b2834f37dc3d616907e46cf1fa5b2f1274f..3beb7c9ee95c79d7219550a62166c648e061a01d 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -27,6 +27,7 @@ from .tapi_tapi.TapiServiceHandler import TapiServiceHandler from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler from .e2e_orch.E2EOrchestratorServiceHandler import E2EOrchestratorServiceHandler from .oc.OCServiceHandler import OCServiceHandler +from .qkd.qkd_service_handler import QKDServiceHandler SERVICE_HANDLERS = [ (L2NMEmulatedServiceHandler, [ @@ -106,5 +107,11 @@ SERVICE_HANDLERS = [ FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_OC, } + ]), + (QKDServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_QKD, + FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_QKD], + } ]) ] diff --git a/src/service/service/service_handlers/qkd/__init__.py b/src/service/service/service_handlers/qkd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/service/service/service_handlers/qkd/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/service/service/service_handlers/qkd/qkd_service_handler.py b/src/service/service/service_handlers/qkd/qkd_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..76c67867ee2f4bae60b8dd6e187f221f2efc1eb0 --- /dev/null +++ b/src/service/service/service_handlers/qkd/qkd_service_handler.py @@ -0,0 +1,394 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging, uuid +from typing import Any, Dict, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, DeviceId, Service +from common.proto.app_pb2 import App, QKDAppStatusEnum, QKDAppTypesEnum +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +LOGGER = logging.getLogger(__name__) + +def get_endpoint_name_by_uuid(device, uuid): + for device_endpoint in device.device_endpoints: + if device_endpoint.endpoint_id.endpoint_uuid.uuid == uuid: + return device_endpoint.name + return None + +class QKDServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(service.service_config, **settings) + + + # Optare: This function is where the service is created + # Optare: It already receives the path provided by pathcomp in endpoints variable + # Optare: It then checks for each respective QKD Node and requests SBI to inform of the new connection + # Optare: It also requests app module for a creation of internal service if the service is virtual + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) < 2 or len(endpoints) % 2: return [] + + LOGGER.info('Endpoints: ' + str(endpoints)) + + + service_uuid = self.__service.service_id.service_uuid.uuid + settings = self.__settings_handler.get('/settings') + + context_uuid = self.__service.service_id.context_id.context_uuid.uuid + + results = [] + try: + + if len(endpoints) > 4: + is_virtual = True + else: + is_virtual = False + + devices = [] + qkdn_ids = [] + interfaces = [] + links = [] + + # Optare: First a big iteration through all devices is done in order to obtain all information needed for the whole operation + # Optare: This is a way to minimize time of operation. Otherwise it would require many O(N) operations. This way we can reduce it to one + + # Populate devices and QKDN ids + for idx, endpoint in enumerate(endpoints[::2]): + device_uuid, endpoint_left_uuid = get_device_endpoint_uuids(endpoint) + _, endpoint_right_uuid = get_device_endpoint_uuids(endpoints[2 * idx + 1]) + + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + devices.append(device) + interfaces.append([0,0]) + links.append([]) + + + + + + endpoint_left = get_endpoint_name_by_uuid(device, endpoint_left_uuid) if idx > 0 else None + endpoint_right = get_endpoint_name_by_uuid(device, endpoint_right_uuid) if 2 * idx + 2 < len(endpoints) else None + + + for config_rule in device.device_config.config_rules: + resource_key = config_rule.custom.resource_key + + if resource_key == '__node__': + value = json.loads(config_rule.custom.resource_value) + qkdn_ids.append(value['qkdn_id']) + + elif resource_key.startswith('/interface'): + value = json.loads(config_rule.custom.resource_value) + try: + endpoint_str = value['qkdi_att_point']['uuid'] + LOGGER.info("A: " + str(endpoint_str) + "....." + str(endpoint_left) + "....." + str(endpoint_right)) + + if endpoint_str == endpoint_left: + interfaces[idx][0] = value['qkdi_id'] + elif endpoint_str == endpoint_right: + interfaces[idx][1] = value['qkdi_id'] + except KeyError: + pass + + elif resource_key.startswith('/link'): + value = json.loads(config_rule.custom.resource_value) + links[idx].append(( value['uuid'], + (value['src_qkdn_id'], value['src_interface_id']), + (value['dst_qkdn_id'], value['dst_interface_id']) + )) + + + LOGGER.info("IFs: " + str(interfaces)) + LOGGER.info("Links: " + str(links)) + LOGGER.info("context_: " + context_uuid) + + + # Optare: From here now is where the work is really done. It iterates over every device in use for the service (ordered) + + src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) + src_device = devices[0] + src_endpoint = get_endpoint_matching(src_device, src_endpoint_uuid) + + dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[-1]) + dst_device = devices[-1] + dst_endpoint = get_endpoint_matching(dst_device, dst_endpoint_uuid) + + src_qkdn_id = qkdn_ids[0] + dst_qkdn_id = qkdn_ids[-1] + + src_interface_id = interfaces[0][1] + dst_interface_id = interfaces[-1][0] + + service_qkdl_id_src_dst = str(uuid.uuid4()) + service_qkdl_id_dst_src = str(uuid.uuid4()) + + + for idx, device in enumerate(devices): + + # Even though we always create them together. There is a chance the admin deletes one of the rules manually + phys_qkdl_id_right = None if idx == (len(devices) - 1) else '' # None == impossible + phys_qkdl_id_left = None if idx == 0 else '' # None == impossible + + for link_uuid, link_src, link_dst in links[idx]: + qkdn_link_src, qkdn_interface_src = link_src + qkdn_link_dst, qkdn_interface_dst = link_dst + + if phys_qkdl_id_right == '' and \ + qkdn_link_src == qkdn_ids[idx] and qkdn_interface_src == interfaces[idx][1] and \ + qkdn_link_dst[idx+1] and qkdn_interface_dst == interfaces[idx+1][0]: + phys_qkdl_id_right = link_uuid + + + if phys_qkdl_id_left == '' and \ + qkdn_link_src == qkdn_ids[idx] and qkdn_interface_src == interfaces[idx][0] and \ + qkdn_link_dst[idx-1] and qkdn_interface_dst == interfaces[idx-1][1]: + phys_qkdl_id_left = link_uuid + + + # Optare: Before adding information to config_rules you have to delete the list first otherwise old content will be called again + del device.device_config.config_rules[:] + + + if phys_qkdl_id_right: + if not is_virtual: + service_qkdl_id_src_dst = phys_qkdl_id_right + + elif phys_qkdl_id_right == '': + qkdl_id_src_dst = str(uuid.uuid4()) if is_virtual else service_qkdl_id_src_dst + + json_config_rule = json_config_rule_set('/link/link[{:s}]'.format(qkdl_id_src_dst), { + 'uuid' : qkdl_id_src_dst, + 'type' : 'DIRECT', + 'src_qkdn_id' : qkdn_ids[idx], + 'src_interface_id' : interfaces[idx][1], + 'dst_qkdn_id' : qkdn_ids[idx+1], + 'dst_interface_id' : interfaces[idx+1][0], + }) + + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + + + if phys_qkdl_id_left: + if not is_virtual: + service_qkdl_id_dst_src = phys_qkdl_id_left + + elif phys_qkdl_id_left == '': + qkdl_id_dst_src = str(uuid.uuid4()) if is_virtual else service_qkdl_id_dst_src + + json_config_rule = json_config_rule_set('/link/link[{:s}]'.format(qkdl_id_dst_src), { + 'uuid' : qkdl_id_dst_src, + 'type' : 'DIRECT', + 'src_qkdn_id' : qkdn_ids[idx], + 'src_interface_id' : interfaces[idx][0], + 'dst_qkdn_id' : qkdn_ids[idx-1], + 'dst_interface_id' : interfaces[idx-1][1], + }) + + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + + + + if is_virtual: + if idx < len(qkdn_ids) - 1: + json_config_rule = json_config_rule_set('/link/link[{:s}]'.format(service_qkdl_id_src_dst), { + 'uuid' : service_qkdl_id_src_dst, + 'type' : 'VIRTUAL', + 'src_qkdn_id' : src_qkdn_id, + 'src_interface_id' : src_interface_id, + 'dst_qkdn_id' : dst_qkdn_id, + 'dst_interface_id' : dst_interface_id, + 'virt_prev_hop' : qkdn_ids[idx-1] if idx > 0 else None, + 'virt_next_hops' : qkdn_ids[idx+1:], + 'virt_bandwidth' : 0, + }) + + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + + if idx > 0: + json_config_rule = json_config_rule_set('/link/link[{:s}]'.format(service_qkdl_id_dst_src), { + 'uuid' : service_qkdl_id_dst_src, + 'type' : 'VIRTUAL', + 'src_qkdn_id' : dst_qkdn_id, + 'src_interface_id' : dst_interface_id, + 'dst_qkdn_id' : src_qkdn_id, + 'dst_interface_id' : src_interface_id, + 'virt_prev_hop' : qkdn_ids[idx+1] if idx < len(qkdn_ids) - 1 else None, + 'virt_next_hops' : qkdn_ids[idx-1::-1], + 'virt_bandwidth' : 0, + }) + + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + + + + json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), { + 'uuid' : service_uuid, + 'qkdl_id_src_dst' : service_qkdl_id_src_dst, + 'qkdl_id_dst_src' : service_qkdl_id_dst_src, + }) + + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) + + + if is_virtual: + + # Register App + internal_app_src_dst = { + 'app_id': {'context_id': {'context_uuid': {'uuid': context_uuid}}, 'app_uuid': {'uuid': str(uuid.uuid4())}}, + 'app_status': QKDAppStatusEnum.QKDAPPSTATUS_ON, + 'app_type': QKDAppTypesEnum.QKDAPPTYPES_INTERNAL, + 'server_app_id': '', + 'client_app_id': [], + 'backing_qkdl_id': [{'qkdl_uuid': {'uuid': service_qkdl_id_src_dst}}], + 'local_device_id': src_device.device_id, + 'remote_device_id': dst_device.device_id, + } + + self.__task_executor.register_app(App(**internal_app_src_dst)) + + + # Register App + internal_app_dst_src = { + 'app_id': {'context_id': {'context_uuid': {'uuid': context_uuid}}, 'app_uuid': {'uuid': str(uuid.uuid4())}}, + 'app_status': QKDAppStatusEnum.QKDAPPSTATUS_ON, + 'app_type': QKDAppTypesEnum.QKDAPPTYPES_INTERNAL, + 'server_app_id': '', + 'client_app_id': [], + 'backing_qkdl_id': [{'qkdl_uuid': {'uuid': service_qkdl_id_dst_src}}], + 'local_device_id': dst_device.device_id, + 'remote_device_id': src_device.device_id, + } + + self.__task_executor.register_app(App(**internal_app_dst_src)) + + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid))) + results.append(e) + + return results + + # Optare: This will be to delete a service + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints form a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + raise NotImplementedError() + + # Optare: Can be ingored. It's in case if a service is later updated. Not required to proper functioning + + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + raise NotImplementedError() + + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + raise NotImplementedError() + + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + raise NotImplementedError() + + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + raise NotImplementedError() diff --git a/src/telemetry/.gitlab-ci.yml b/src/telemetry/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..110a6490d20558c6589550be45b6432e500ba9d6 --- /dev/null +++ b/src/telemetry/.gitlab-ci.yml @@ -0,0 +1,203 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build telemetry: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + # This first build tags the builder resulting image to prevent being removed by dangling image removal command + # - docker buildx build -t "${IMAGE_NAME}-backend:${IMAGE_TAG}-builder" --target builder -f ./src/$IMAGE_NAME/backend/Dockerfile . + - docker buildx build -t "${IMAGE_NAME}-frontend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/frontend/Dockerfile . + - docker buildx build -t "${IMAGE_NAME}-backend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/backend/Dockerfile . + - docker tag "${IMAGE_NAME}-frontend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker tag "${IMAGE_NAME}-backend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/.gitlab-ci.yml + - src/$IMAGE_NAME/frontend/**/*.{py,in,yml} + - src/$IMAGE_NAME/frontend/Dockerfile + - src/$IMAGE_NAME/frontend/tests/*.py + - src/$IMAGE_NAME/backend/Dockerfile + - src/$IMAGE_NAME/backend/**/*.{py,in,yml} + - src/$IMAGE_NAME/backend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + +# Apply unit test to the component +unit_test telemetry-backend: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build telemetry + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + # - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME-backend -d -p 30060:30060 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/backend/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG + - docker ps -a + - sleep 5 + - docker logs ${IMAGE_NAME}-backend + - > + docker exec -i ${IMAGE_NAME}-backend bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-backend_report.xml $IMAGE_NAME/backend/tests/test_*.py" + - docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + - docker rm -f ${IMAGE_NAME}-backend + - docker rm -f zookeeper + - docker rm -f kafka + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/backend/**/*.{py,in,yml} + - src/$IMAGE_NAME/backend/Dockerfile + - src/$IMAGE_NAME/backend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/backend/tests/${IMAGE_NAME}-backend_report.xml + +# Apply unit test to the component +unit_test telemetry-frontend: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build telemetry + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi + - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - docker pull "cockroachdb/cockroach:latest-v22.2" + - docker volume create crdb + - > + docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 + --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 + --volume "crdb:/cockroach/cockroach-data" + cockroachdb/cockroach:latest-v22.2 start-single-node + - echo "Waiting for initialization..." + - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done + # - docker logs crdb + # - docker ps -a + - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $CRDB_ADDRESS + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \ + -e ALLOW_ANONYMOUS_LOGIN=yes \ + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + # - docker logs zookeeper + # - docker logs kafka + - > + docker run --name $IMAGE_NAME-frontend -d -p 30050:30050 + --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/frontend/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG + - docker ps -a + - sleep 5 + - docker logs ${IMAGE_NAME}-frontend + - > + docker exec -i ${IMAGE_NAME}-frontend bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_*.py" + - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker volume rm -f crdb + - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + - docker rm -f ${IMAGE_NAME}-frontend + - docker rm -f zookeeper + - docker rm -f kafka + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/frontend/**/*.{py,in,yml} + - src/$IMAGE_NAME/frontend/Dockerfile + - src/$IMAGE_NAME/frontend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml \ No newline at end of file diff --git a/src/telemetry/backend/Dockerfile b/src/telemetry/backend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..eebfe24ab3ca457b9d05b02a07f4b28d6f196987 --- /dev/null +++ b/src/telemetry/backend/Dockerfile @@ -0,0 +1,69 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/telemetry/backend +WORKDIR /var/teraflow/telemetry/backend +COPY src/telemetry/backend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/telemetry/__init__.py telemetry/__init__.py +COPY src/telemetry/backend/. telemetry/backend/ + +# Start the service +ENTRYPOINT ["python", "-m", "telemetry.backend.service"] diff --git a/src/telemetry/backend/requirements.in b/src/telemetry/backend/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..e6a559be714faa31196206dbbdc53788506369b5 --- /dev/null +++ b/src/telemetry/backend/requirements.in @@ -0,0 +1,15 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +confluent-kafka==2.3.* diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index d81be79dbe410ccbf2781816f34735f6bfe5639d..6ab841238f446a2895cd163fab4b7eb05eaa3176 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -12,64 +12,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ast +import json import time import random import logging -import requests import threading -from typing import Any, Tuple -from common.proto.context_pb2 import Empty +from typing import Any, Dict +# from common.proto.context_pb2 import Empty from confluent_kafka import Producer as KafkaProducer from confluent_kafka import Consumer as KafkaConsumer -from confluent_kafka import KafkaException from confluent_kafka import KafkaError -from confluent_kafka.admin import AdminClient, NewTopic -from common.proto.telemetry_frontend_pb2 import Collector, CollectorId -from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from common.method_wrappers.Decorator import MetricsPool +from common.tools.service.GenericGrpcService import GenericGrpcService LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Telemetry', 'TelemetryBackend') -KAFKA_SERVER_IP = '127.0.0.1:9092' -# KAFKA_SERVER_IP = '10.152.183.175:30092' -ADMIN_KAFKA_CLIENT = AdminClient({'bootstrap.servers': KAFKA_SERVER_IP}) -KAFKA_TOPICS = {'request' : 'topic_request', 'response': 'topic_response', - 'raw' : 'topic_raw' , 'labeled' : 'topic_labeled'} -EXPORTER_ENDPOINT = "http://10.152.183.2:9100/metrics" -PRODUCER_CONFIG = {'bootstrap.servers': KAFKA_SERVER_IP,} +METRICS_POOL = MetricsPool('TelemetryBackend', 'backendService') - -class TelemetryBackendService: +class TelemetryBackendService(GenericGrpcService): """ - Class to listens for request on Kafka topic, fetches metrics and produces measured values to another Kafka topic. + Class listens for request on Kafka topic, fetches requested metrics from device. + Produces metrics on both RESPONSE and VALUE kafka topics. """ - - def __init__(self): + def __init__(self, cls_name : str = __name__) -> None: LOGGER.info('Init TelemetryBackendService') + port = get_service_port_grpc(ServiceNameEnum.TELEMETRYBACKEND) + super().__init__(port, cls_name=cls_name) + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'backend', + 'auto.offset.reset' : 'latest'}) self.running_threads = {} - - def run_kafka_listener(self)->bool: - threading.Thread(target=self.kafka_listener).start() - return True - - def kafka_listener(self): + + def install_servicers(self): + threading.Thread(target=self.RequestListener).start() + + def RequestListener(self): """ listener for requests on Kafka topic. """ - conusmer_configs = { - 'bootstrap.servers' : KAFKA_SERVER_IP, - 'group.id' : 'backend', - 'auto.offset.reset' : 'latest' - } - # topic_request = "topic_request" - consumerObj = KafkaConsumer(conusmer_configs) - # consumerObj.subscribe([topic_request]) - consumerObj.subscribe([KAFKA_TOPICS['request']]) - + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.REQUEST.value]) while True: - receive_msg = consumerObj.poll(2.0) + receive_msg = consumer.poll(2.0) if receive_msg is None: - # print (time.time(), " - Telemetry backend is listening on Kafka Topic: ", KAFKA_TOPICS['request']) # added for debugging purposes continue elif receive_msg.error(): if receive_msg.error().code() == KafkaError._PARTITION_EOF: @@ -77,177 +65,175 @@ class TelemetryBackendService: else: print("Consumer error: {}".format(receive_msg.error())) break - (kpi_id, duration, interval) = ast.literal_eval(receive_msg.value().decode('utf-8')) + + collector = json.loads(receive_msg.value().decode('utf-8')) collector_id = receive_msg.key().decode('utf-8') - if duration == -1 and interval == -1: - self.terminate_collector_backend(collector_id) - # threading.Thread(target=self.terminate_collector_backend, args=(collector_id)) + LOGGER.debug('Recevied Collector: {:} - {:}'.format(collector_id, collector)) + print('Recevied Collector: {:} - {:}'.format(collector_id, collector)) + + if collector['duration'] == -1 and collector['interval'] == -1: + self.TerminateCollectorBackend(collector_id) else: - self.run_initiate_collector_backend(collector_id, kpi_id, duration, interval) + self.RunInitiateCollectorBackend(collector_id, collector) + def TerminateCollectorBackend(self, collector_id): + if collector_id in self.running_threads: + thread, stop_event = self.running_threads[collector_id] + stop_event.set() + thread.join() + print ("Terminating backend (by StopCollector): Collector Id: ", collector_id) + del self.running_threads[collector_id] + self.GenerateCollectorResponse(collector_id, "-1", -1) # Termination confirmation to frontend. + else: + print ('Backend collector {:} not found'.format(collector_id)) - def run_initiate_collector_backend(self, collector_id: str, kpi_id: str, duration: int, interval: int): + def RunInitiateCollectorBackend(self, collector_id: str, collector: str): stop_event = threading.Event() - thread = threading.Thread(target=self.initiate_collector_backend, - args=(collector_id, kpi_id, duration, interval, stop_event)) + thread = threading.Thread(target=self.InitiateCollectorBackend, + args=(collector_id, collector, stop_event)) self.running_threads[collector_id] = (thread, stop_event) thread.start() - def initiate_collector_backend(self, collector_id, kpi_id, duration, interval, stop_event - ): # type: ignore + def InitiateCollectorBackend(self, collector_id, collector, stop_event): """ - Method to receive collector request attribues and initiates collecter backend. + Method receives collector request and initiates collecter backend. """ print("Initiating backend for collector: ", collector_id) start_time = time.time() while not stop_event.is_set(): - if time.time() - start_time >= duration: # condition to terminate backend + if time.time() - start_time >= collector['duration']: # condition to terminate backend print("Execuation duration completed: Terminating backend: Collector Id: ", collector_id, " - ", time.time() - start_time) - self.generate_kafka_response(collector_id, "-1", -1) - # write to Kafka to send the termination confirmation. + self.GenerateCollectorResponse(collector_id, "-1", -1) # Termination confirmation to frontend. break - # print ("Received KPI: ", kpi_id, ", Duration: ", duration, ", Fetch Interval: ", interval) - self.extract_kpi_value(collector_id, kpi_id) - # print ("Telemetry Backend running for KPI: ", kpi_id, "after FETCH INTERVAL: ", interval) - time.sleep(interval) + self.ExtractKpiValue(collector_id, collector['kpi_id']) + time.sleep(collector['interval']) - def extract_kpi_value(self, collector_id: str, kpi_id: str): + def ExtractKpiValue(self, collector_id: str, kpi_id: str): """ Method to extract kpi value. """ - measured_kpi_value = random.randint(1,100) # Should be extracted from exporter/stream - # measured_kpi_value = self.fetch_node_exporter_metrics() # exporter extracted metric value against default KPI - self.generate_kafka_response(collector_id, kpi_id , measured_kpi_value) + measured_kpi_value = random.randint(1,100) # TODO: To be extracted from a device + print ("Measured Kpi value: {:}".format(measured_kpi_value)) + # measured_kpi_value = self.fetch_node_exporter_metrics() # exporter extracted metric value against default KPI + self.GenerateCollectorResponse(collector_id, kpi_id , measured_kpi_value) - def generate_kafka_response(self, collector_id: str, kpi_id: str, kpi_value: Any): + def GenerateCollectorResponse(self, collector_id: str, kpi_id: str, measured_kpi_value: Any): """ - Method to write response on Kafka topic + Method to write kpi value on RESPONSE Kafka topic """ - # topic_response = "topic_response" - msg_value : Tuple [str, Any] = (kpi_id, kpi_value) - msg_key = collector_id - producerObj = KafkaProducer(PRODUCER_CONFIG) - # producerObj.produce(topic_response, key=msg_key, value= str(msg_value), callback=self.delivery_callback) - producerObj.produce(KAFKA_TOPICS['response'], key=msg_key, value= str(msg_value), callback=TelemetryBackendService.delivery_callback) - producerObj.flush() - - def terminate_collector_backend(self, collector_id): - if collector_id in self.running_threads: - thread, stop_event = self.running_threads[collector_id] - stop_event.set() - thread.join() - print ("Terminating backend (by StopCollector): Collector Id: ", collector_id) - del self.running_threads[collector_id] - self.generate_kafka_response(collector_id, "-1", -1) + producer = self.kafka_producer + kpi_value : Dict = { + "kpi_id" : kpi_id, + "kpi_value" : measured_kpi_value + } + producer.produce( + KafkaTopic.RESPONSE.value, + key = collector_id, + value = json.dumps(kpi_value), + callback = self.delivery_callback + ) + producer.flush() - def create_topic_if_not_exists(self, new_topics: list) -> bool: - """ - Method to create Kafka topic if it does not exist. - Args: - admin_client (AdminClient): Kafka admin client. + def GenerateRawMetric(self, metrics: Any): """ - for topic in new_topics: - try: - topic_metadata = ADMIN_KAFKA_CLIENT.list_topics(timeout=5) - if topic not in topic_metadata.topics: - # If the topic does not exist, create a new topic - print(f"Topic '{topic}' does not exist. Creating...") - LOGGER.warning("Topic {:} does not exist. Creating...".format(topic)) - new_topic = NewTopic(topic, num_partitions=1, replication_factor=1) - ADMIN_KAFKA_CLIENT.create_topics([new_topic]) - except KafkaException as e: - print(f"Failed to create topic: {e}") - return False - return True - - @staticmethod - def delivery_callback( err, msg): - """ - Callback function to handle message delivery status. - Args: - err (KafkaError): Kafka error object. - msg (Message): Kafka message object. + Method writes raw metrics on VALUE Kafka topic """ - if err: - print(f'Message delivery failed: {err}') - else: - print(f'Message delivered to topic {msg.topic()}') + producer = self.kafka_producer + some_metric : Dict = { + "some_id" : metrics + } + producer.produce( + KafkaTopic.VALUE.value, + key = 'raw', + value = json.dumps(some_metric), + callback = self.delivery_callback + ) + producer.flush() -# ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter ----------- - @staticmethod - def fetch_single_node_exporter_metric(): + def delivery_callback(self, err, msg): """ - Method to fetch metrics from Node Exporter. - Returns: - str: Metrics fetched from Node Exporter. - """ - KPI = "node_network_receive_packets_total" - try: - response = requests.get(EXPORTER_ENDPOINT) # type: ignore - LOGGER.info("Request status {:}".format(response)) - if response.status_code == 200: - # print(f"Metrics fetched sucessfully...") - metrics = response.text - # Check if the desired metric is available in the response - if KPI in metrics: - KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI) - # Extract the metric value - if KPI_VALUE is not None: - LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE)) - print(f"Extracted value of {KPI} is: {KPI_VALUE}") - return KPI_VALUE - else: - LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code)) - # print(f"Failed to fetch metrics. Status code: {response.status_code}") - return None - except Exception as e: - LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) - # print(f"Failed to fetch metrics: {str(e)}") - return None - - @staticmethod - def extract_metric_value(metrics, metric_name): - """ - Method to extract the value of a metric from the metrics string. - Args: - metrics (str): Metrics string fetched from Exporter. - metric_name (str): Name of the metric to extract. - Returns: - float: Value of the extracted metric, or None if not found. - """ - try: - # Find the metric line containing the desired metric name - metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name)) - # Split the line to extract the metric value - metric_value = float(metric_line.split()[1]) - return metric_value - except StopIteration: - print(f"Metric '{metric_name}' not found in the metrics.") - return None - - @staticmethod - def stream_node_export_metrics_to_raw_topic(): - try: - while True: - response = requests.get(EXPORTER_ENDPOINT) - # print("Response Status {:} ".format(response)) - # LOGGER.info("Response Status {:} ".format(response)) - try: - if response.status_code == 200: - producerObj = KafkaProducer(PRODUCER_CONFIG) - producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback) - producerObj.flush() - LOGGER.info("Produce to topic") - else: - LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code)) - print(f"Didn't received expected response. Status code: {response.status_code}") - return None - time.sleep(15) - except Exception as e: - LOGGER.info("Failed to process response. Status code: {:}".format(e)) - return None - except Exception as e: - LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) - print(f"Failed to fetch metrics: {str(e)}") - return None -# ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter ----------- \ No newline at end of file + Callback function to handle message delivery status. + Args: err (KafkaError): Kafka error object. + msg (Message): Kafka message object. + """ + if err: print(f'Message delivery failed: {err}') + # else: print(f'Message delivered to topic {msg.topic()}') + +# # ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter ----------- +# @staticmethod +# def fetch_single_node_exporter_metric(): +# """ +# Method to fetch metrics from Node Exporter. +# Returns: +# str: Metrics fetched from Node Exporter. +# """ +# KPI = "node_network_receive_packets_total" +# try: +# response = requests.get(EXPORTER_ENDPOINT) # type: ignore +# LOGGER.info("Request status {:}".format(response)) +# if response.status_code == 200: +# # print(f"Metrics fetched sucessfully...") +# metrics = response.text +# # Check if the desired metric is available in the response +# if KPI in metrics: +# KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI) +# # Extract the metric value +# if KPI_VALUE is not None: +# LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE)) +# print(f"Extracted value of {KPI} is: {KPI_VALUE}") +# return KPI_VALUE +# else: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code)) +# # print(f"Failed to fetch metrics. Status code: {response.status_code}") +# return None +# except Exception as e: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) +# # print(f"Failed to fetch metrics: {str(e)}") +# return None + +# @staticmethod +# def extract_metric_value(metrics, metric_name): +# """ +# Method to extract the value of a metric from the metrics string. +# Args: +# metrics (str): Metrics string fetched from Exporter. +# metric_name (str): Name of the metric to extract. +# Returns: +# float: Value of the extracted metric, or None if not found. +# """ +# try: +# # Find the metric line containing the desired metric name +# metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name)) +# # Split the line to extract the metric value +# metric_value = float(metric_line.split()[1]) +# return metric_value +# except StopIteration: +# print(f"Metric '{metric_name}' not found in the metrics.") +# return None + +# @staticmethod +# def stream_node_export_metrics_to_raw_topic(): +# try: +# while True: +# response = requests.get(EXPORTER_ENDPOINT) +# # print("Response Status {:} ".format(response)) +# # LOGGER.info("Response Status {:} ".format(response)) +# try: +# if response.status_code == 200: +# producerObj = KafkaProducer(PRODUCER_CONFIG) +# producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback) +# producerObj.flush() +# LOGGER.info("Produce to topic") +# else: +# LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code)) +# print(f"Didn't received expected response. Status code: {response.status_code}") +# return None +# time.sleep(15) +# except Exception as e: +# LOGGER.info("Failed to process response. Status code: {:}".format(e)) +# return None +# except Exception as e: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) +# print(f"Failed to fetch metrics: {str(e)}") +# return None +# # ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter ----------- \ No newline at end of file diff --git a/src/telemetry/backend/service/__main__.py b/src/telemetry/backend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec9e191fd22e07da46f80214ade0ac516032433 --- /dev/null +++ b/src/telemetry/backend/service/__main__.py @@ -0,0 +1,56 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port +from .TelemetryBackendService import TelemetryBackendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + grpc_service = TelemetryBackendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/telemetry/backend/tests/testTelemetryBackend.py b/src/telemetry/backend/tests/testTelemetryBackend.py deleted file mode 100644 index d832e54e77589ca677682760d19e68b1bd09b1f7..0000000000000000000000000000000000000000 --- a/src/telemetry/backend/tests/testTelemetryBackend.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -print (sys.path) -sys.path.append('/home/tfs/tfs-ctrl') -import threading -import logging -from typing import Tuple -# from common.proto.context_pb2 import Empty -from src.telemetry.backend.service.TelemetryBackendService import TelemetryBackendService - -LOGGER = logging.getLogger(__name__) - - -########################### -# Tests Implementation of Telemetry Backend -########################### - -def test_verify_kafka_topics(): - LOGGER.info('test_verify_kafka_topics requesting') - TelemetryBackendServiceObj = TelemetryBackendService() - KafkaTopics = ['topic_request', 'topic_response', 'topic_raw', 'topic_labled'] - response = TelemetryBackendServiceObj.create_topic_if_not_exists(KafkaTopics) - LOGGER.debug(str(response)) - assert isinstance(response, bool) - -# def test_run_kafka_listener(): -# LOGGER.info('test_receive_kafka_request requesting') -# TelemetryBackendServiceObj = TelemetryBackendService() -# response = TelemetryBackendServiceObj.run_kafka_listener() -# LOGGER.debug(str(response)) -# assert isinstance(response, bool) - -# def test_fetch_node_exporter_metrics(): -# LOGGER.info(' >>> test_fetch_node_exporter_metrics START <<< ') -# TelemetryBackendService.fetch_single_node_exporter_metric() - -def test_stream_node_export_metrics_to_raw_topic(): - LOGGER.info(' >>> test_stream_node_export_metrics_to_raw_topic START <<< ') - threading.Thread(target=TelemetryBackendService.stream_node_export_metrics_to_raw_topic, args=()).start() - diff --git a/src/telemetry/backend/tests/test_TelemetryBackend.py b/src/telemetry/backend/tests/test_TelemetryBackend.py new file mode 100644 index 0000000000000000000000000000000000000000..a2bbee540c3ce348ef52eceb0e776f48a68d94b1 --- /dev/null +++ b/src/telemetry/backend/tests/test_TelemetryBackend.py @@ -0,0 +1,38 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.tools.kafka.Variables import KafkaTopic +from telemetry.backend.service.TelemetryBackendService import TelemetryBackendService + + +LOGGER = logging.getLogger(__name__) + + +########################### +# Tests Implementation of Telemetry Backend +########################### + +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +def test_RunRequestListener(): + LOGGER.info('test_RunRequestListener') + TelemetryBackendServiceObj = TelemetryBackendService() + response = TelemetryBackendServiceObj.RunRequestListener() + LOGGER.debug(str(response)) + assert isinstance(response, bool) diff --git a/src/telemetry/database/TelemetryDBmanager.py b/src/telemetry/database/TelemetryDBmanager.py deleted file mode 100644 index b558180a9e1fbf85bf523c7faededf58f57e2264..0000000000000000000000000000000000000000 --- a/src/telemetry/database/TelemetryDBmanager.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, time -import sqlalchemy -from sqlalchemy import inspect, MetaData, Table -from sqlalchemy.orm import sessionmaker -from telemetry.database.TelemetryModel import Collector as CollectorModel -from telemetry.database.TelemetryModel import Kpi as KpiModel -from sqlalchemy.ext.declarative import declarative_base -from telemetry.database.TelemetryEngine import TelemetryEngine -from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId -from common.proto.telemetry_frontend_pb2 import Collector, CollectorId -from sqlalchemy.exc import SQLAlchemyError -from telemetry.database.TelemetryModel import Base - -LOGGER = logging.getLogger(__name__) -DB_NAME = "telemetryfrontend" - -class TelemetryDBmanager: - def __init__(self): - self.db_engine = TelemetryEngine.get_engine() - if self.db_engine is None: - LOGGER.error('Unable to get SQLAlchemy DB Engine...') - return False - self.db_name = DB_NAME - self.Session = sessionmaker(bind=self.db_engine) - - def create_database(self): - try: - # with self.db_engine.connect() as connection: - # connection.execute(f"CREATE DATABASE {self.db_name};") - TelemetryEngine.create_database(self.db_engine) - LOGGER.info('TelemetryDBmanager initalized DB Name: {:}'.format(self.db_name)) - return True - except Exception as e: # pylint: disable=bare-except # pragma: no cover - LOGGER.exception('Failed to check/create the database: {:s}'.format(str(e))) - return False - - def create_tables(self): - try: - Base.metadata.create_all(self.db_engine) # type: ignore - LOGGER.info("Tables created in database ({:}) the as per Models".format(self.db_name)) - except Exception as e: - LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e))) - - def verify_tables(self): - try: - with self.db_engine.connect() as connection: - result = connection.execute("SHOW TABLES;") - tables = result.fetchall() - LOGGER.info("Tables in DB: {:}".format(tables)) - except Exception as e: - LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) - - def drop_table(self, table_to_drop: str): - try: - inspector = inspect(self.db_engine) - existing_tables = inspector.get_table_names() - if table_to_drop in existing_tables: - table = Table(table_to_drop, MetaData(), autoload_with=self.db_engine) - table.drop(self.db_engine) - LOGGER.info("Tables delete in the DB Name: {:}".format(self.db_name)) - else: - LOGGER.warning("No table {:} in database {:} ".format(table_to_drop, DB_NAME)) - except Exception as e: - LOGGER.info("Tables cannot be deleted in the {:} database. {:s}".format(DB_NAME, str(e))) - - def list_databases(self): - query = "SHOW DATABASES" - with self.db_engine.connect() as connection: - result = connection.execute(query) - databases = [row[0] for row in result] - LOGGER.info("List of available DBs: {:}".format(databases)) - -# ------------------ INSERT METHODs -------------------------------------- - - def inser_kpi(self, request: KpiDescriptor): - session = self.Session() - try: - # Create a new Kpi instance - kpi_to_insert = KpiModel() - kpi_to_insert.kpi_id = request.kpi_id.kpi_id.uuid - kpi_to_insert.kpi_description = request.kpi_description - kpi_to_insert.kpi_sample_type = request.kpi_sample_type - kpi_to_insert.device_id = request.service_id.service_uuid.uuid - kpi_to_insert.endpoint_id = request.device_id.device_uuid.uuid - kpi_to_insert.service_id = request.slice_id.slice_uuid.uuid - kpi_to_insert.slice_id = request.endpoint_id.endpoint_uuid.uuid - kpi_to_insert.connection_id = request.connection_id.connection_uuid.uuid - # kpi_to_insert.link_id = request.link_id.link_id.uuid - # Add the instance to the session - session.add(kpi_to_insert) - session.commit() - LOGGER.info("Row inserted into kpi table: {:}".format(kpi_to_insert.kpi_id)) - except Exception as e: - session.rollback() - LOGGER.info("Failed to insert new kpi. {:s}".format(str(e))) - finally: - # Close the session - session.close() - - # Function to insert a row into the Collector model - def insert_collector(self, request: Collector): - session = self.Session() - try: - # Create a new Collector instance - collector_to_insert = CollectorModel() - collector_to_insert.collector_id = request.collector_id.collector_id.uuid - collector_to_insert.kpi_id = request.kpi_id.kpi_id.uuid - collector_to_insert.collector = "Test collector description" - collector_to_insert.sampling_duration_s = request.duration_s - collector_to_insert.sampling_interval_s = request.interval_s - collector_to_insert.start_timestamp = time.time() - collector_to_insert.end_timestamp = time.time() - - session.add(collector_to_insert) - session.commit() - LOGGER.info("Row inserted into collector table: {:}".format(collector_to_insert.collector_id)) - except Exception as e: - session.rollback() - LOGGER.info("Failed to insert new collector. {:s}".format(str(e))) - finally: - # Close the session - session.close() - -# ------------------ GET METHODs -------------------------------------- - - def get_kpi_descriptor(self, request: KpiId): - session = self.Session() - try: - kpi_id_to_search = request.kpi_id.uuid - kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_search).first() - if kpi: - LOGGER.info("kpi ID found: {:s}".format(str(kpi))) - return kpi - else: - LOGGER.warning("Kpi ID not found {:s}".format(str(kpi_id_to_search))) - return None - except Exception as e: - session.rollback() - LOGGER.info("Failed to retrieve KPI ID. {:s}".format(str(e))) - raise - finally: - session.close() - - def get_collector(self, request: CollectorId): - session = self.Session() - try: - collector_id_to_search = request.collector_id.uuid - collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_search).first() - if collector: - LOGGER.info("collector ID found: {:s}".format(str(collector))) - return collector - else: - LOGGER.warning("collector ID not found{:s}".format(str(collector_id_to_search))) - return None - except Exception as e: - session.rollback() - LOGGER.info("Failed to retrieve collector ID. {:s}".format(str(e))) - raise - finally: - session.close() - - # ------------------ SELECT METHODs -------------------------------------- - - def select_kpi_descriptor(self, **filters): - session = self.Session() - try: - query = session.query(KpiModel) - for column, value in filters.items(): - query = query.filter(getattr(KpiModel, column) == value) - result = query.all() - if len(result) != 0: - LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result))) - else: - LOGGER.warning("No matching row found : {:s}".format(str(result))) - return result - except SQLAlchemyError as e: - LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e)) - return [] - finally: - session.close() - - def select_collector(self, **filters): - session = self.Session() - try: - query = session.query(CollectorModel) - for column, value in filters.items(): - query = query.filter(getattr(CollectorModel, column) == value) - result = query.all() - if len(result) != 0: - LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result))) - else: - LOGGER.warning("No matching row found : {:s}".format(str(result))) - return result - except SQLAlchemyError as e: - LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e)) - return [] - finally: - session.close() - -# ------------------ DELETE METHODs -------------------------------------- - - def delete_kpi_descriptor(self, request: KpiId): - session = self.Session() - try: - kpi_id_to_delete = request.kpi_id.uuid - kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_delete).first() - if kpi: - session.delete(kpi) - session.commit() - LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id_to_delete) - else: - LOGGER.warning("KPI with kpi_id %s not found", kpi_id_to_delete) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id_to_delete, e) - finally: - session.close() - - def delete_collector(self, request: CollectorId): - session = self.Session() - try: - collector_id_to_delete = request.collector_id.uuid - collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_delete).first() - if collector: - session.delete(collector) - session.commit() - LOGGER.info("Deleted collector with collector_id: %s", collector_id_to_delete) - else: - LOGGER.warning("collector with collector_id %s not found", collector_id_to_delete) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting collector with collector_id %s: %s", collector_id_to_delete, e) - finally: - session.close() \ No newline at end of file diff --git a/src/telemetry/database/TelemetryEngine.py b/src/telemetry/database/TelemetryEngine.py index a563fa09f94c812aed07d0aa3cbd5bc988737fc4..7c8620faf25e695e7f971bce78be9ad208a7701b 100644 --- a/src/telemetry/database/TelemetryEngine.py +++ b/src/telemetry/database/TelemetryEngine.py @@ -12,48 +12,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, sqlalchemy, sqlalchemy_utils -# from common.Settings import get_setting +import logging, sqlalchemy +from common.Settings import get_setting LOGGER = logging.getLogger(__name__) - -APP_NAME = 'tfs' -ECHO = False # False: No dump SQL commands and transactions executed -CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}' -# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' class TelemetryEngine: - # def __init__(self): - # self.engine = self.get_engine() @staticmethod def get_engine() -> sqlalchemy.engine.Engine: - CRDB_NAMESPACE = "crdb" - CRDB_SQL_PORT = "26257" - CRDB_DATABASE = "telemetryfrontend" - CRDB_USERNAME = "tfs" - CRDB_PASSWORD = "tfs123" - CRDB_SSLMODE = "require" - crdb_uri = CRDB_URI_TEMPLATE.format( - CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) - # crdb_uri = CRDB_URI_TEMPLATE.format( - # CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = "tfs-telemetry" # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) try: - # engine = sqlalchemy.create_engine( - # crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True) engine = sqlalchemy.create_engine(crdb_uri, echo=False) - LOGGER.info(' TelemetryDBmanager initalized with DB URL: {:}'.format(crdb_uri)) + LOGGER.info(' TelemetryDB initalized with DB URL: {:}'.format(crdb_uri)) except: # pylint: disable=bare-except # pragma: no cover LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) return None # type: ignore - return engine # type: ignore - - @staticmethod - def create_database(engine : sqlalchemy.engine.Engine) -> None: - if not sqlalchemy_utils.database_exists(engine.url): - LOGGER.info("Database created. {:}".format(engine.url)) - sqlalchemy_utils.create_database(engine.url) - - @staticmethod - def drop_database(engine : sqlalchemy.engine.Engine) -> None: - if sqlalchemy_utils.database_exists(engine.url): - sqlalchemy_utils.drop_database(engine.url) + return engine diff --git a/src/telemetry/database/TelemetryModel.py b/src/telemetry/database/TelemetryModel.py index be4f0969c86638520cf226b8e42db90426165804..4e71ce8138af39e51c80791dbd6683d855231d7b 100644 --- a/src/telemetry/database/TelemetryModel.py +++ b/src/telemetry/database/TelemetryModel.py @@ -14,32 +14,60 @@ import logging from sqlalchemy.dialects.postgresql import UUID -from sqlalchemy import Column, Integer, String, Float, Text, ForeignKey -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker, relationship +from sqlalchemy import Column, String, Float from sqlalchemy.orm import registry +from common.proto import telemetry_frontend_pb2 logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) # Create a base class for declarative models Base = registry().generate_base() -# Base = declarative_base() class Collector(Base): __tablename__ = 'collector' collector_id = Column(UUID(as_uuid=False), primary_key=True) - kpi_id = Column(UUID(as_uuid=False)) - collector_decription = Column(String) - sampling_duration_s = Column(Float) - sampling_interval_s = Column(Float) - start_timestamp = Column(Float) - end_timestamp = Column(Float) - + kpi_id = Column(UUID(as_uuid=False), nullable=False) + sampling_duration_s = Column(Float , nullable=False) + sampling_interval_s = Column(Float , nullable=False) + start_timestamp = Column(Float , nullable=False) + end_timestamp = Column(Float , nullable=False) + # helps in logging the information def __repr__(self): - return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', " - f"collector='{self.collector_decription}', sampling_duration_s='{self.sampling_duration_s}', " - f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', " - f"end_timestamp='{self.end_timestamp}')>") \ No newline at end of file + return (f"<Collector(collector_id='{self.collector_id}' , kpi_id='{self.kpi_id}', " + f"sampling_duration_s='{self.sampling_duration_s}', sampling_interval_s='{self.sampling_interval_s}'," + f"start_timestamp='{self.start_timestamp}' , end_timestamp='{self.end_timestamp}')>") + + @classmethod + def ConvertCollectorToRow(cls, request): + """ + Create an instance of Collector table rows from a request object. + Args: request: The request object containing collector gRPC message. + Returns: A row (an instance of Collector table) initialized with content of the request. + """ + return cls( + collector_id = request.collector_id.collector_id.uuid, + kpi_id = request.kpi_id.kpi_id.uuid, + sampling_duration_s = request.duration_s, + sampling_interval_s = request.interval_s, + start_timestamp = request.start_time.timestamp, + end_timestamp = request.end_time.timestamp + ) + + @classmethod + def ConvertRowToCollector(cls, row): + """ + Create and return a dictionary representation of a Collector table instance. + Args: row: The Collector table instance (row) containing the data. + Returns: collector gRPC message initialized with the content of a row. + """ + response = telemetry_frontend_pb2.Collector() + response.collector_id.collector_id.uuid = row.collector_id + response.kpi_id.kpi_id.uuid = row.kpi_id + response.duration_s = row.sampling_duration_s + response.interval_s = row.sampling_interval_s + response.start_time.timestamp = row.start_timestamp + response.end_time.timestamp = row.end_timestamp + return response diff --git a/src/telemetry/database/Telemetry_DB.py b/src/telemetry/database/Telemetry_DB.py new file mode 100644 index 0000000000000000000000000000000000000000..32acfd73a410a7bfddd6b487d0b1962afadb3842 --- /dev/null +++ b/src/telemetry/database/Telemetry_DB.py @@ -0,0 +1,137 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sqlalchemy_utils +from sqlalchemy import inspect +from sqlalchemy.orm import sessionmaker +from telemetry.database.TelemetryModel import Collector as CollectorModel +from telemetry.database.TelemetryEngine import TelemetryEngine +from common.method_wrappers.ServiceExceptions import ( + OperationFailedException, AlreadyExistsException ) + +LOGGER = logging.getLogger(__name__) +DB_NAME = "tfs_telemetry" + +class TelemetryDB: + def __init__(self): + self.db_engine = TelemetryEngine.get_engine() + if self.db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return False + self.db_name = DB_NAME + self.Session = sessionmaker(bind=self.db_engine) + + def create_database(self): + if not sqlalchemy_utils.database_exists(self.db_engine.url): + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) + sqlalchemy_utils.create_database(self.db_engine.url) + + def drop_database(self) -> None: + if sqlalchemy_utils.database_exists(self.db_engine.url): + sqlalchemy_utils.drop_database(self.db_engine.url) + + def create_tables(self): + try: + CollectorModel.metadata.create_all(self.db_engine) # type: ignore + LOGGER.debug("Tables created in the database: {:}".format(self.db_name)) + except Exception as e: + LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e))) + raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)]) + + def verify_tables(self): + try: + inspect_object = inspect(self.db_engine) + if(inspect_object.has_table('collector', None)): + LOGGER.info("Table exists in DB: {:}".format(self.db_name)) + except Exception as e: + LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) + +# ----------------- CURD METHODs --------------------- + + def add_row_to_db(self, row): + session = self.Session() + try: + session.add(row) + session.commit() + LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.") + return True + except Exception as e: + session.rollback() + if "psycopg2.errors.UniqueViolation" in str(e): + LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") + raise AlreadyExistsException(row.__class__.__name__, row, + extra_details=["Unique key voilation: {:}".format(e)] ) + else: + LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def search_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + entity = session.query(model).filter_by(**{col_name: id_to_search}).first() + if entity: + # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}") + return entity + else: + LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}") + print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) + return None + except Exception as e: + session.rollback() + LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") + raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) + finally: + session.close() + + def delete_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + record = session.query(model).filter_by(**{col_name: id_to_search}).first() + if record: + session.delete(record) + session.commit() + LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) + else: + LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search) + return None + except Exception as e: + session.rollback() + LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def select_with_filter(self, model, filter_object): + session = self.Session() + try: + query = session.query(CollectorModel) + # Apply filters based on the filter_object + if filter_object.kpi_id: + query = query.filter(CollectorModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id])) + result = query.all() + # query should be added to return all rows + if result: + LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") # - Results: {result} + else: + LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}") + return result + except Exception as e: + LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}") + raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)]) + finally: + session.close() + diff --git a/src/telemetry/database/managementDB.py b/src/telemetry/database/managementDB.py deleted file mode 100644 index f79126f279d7bbece6c08ae5eb1cd74e340d1c7d..0000000000000000000000000000000000000000 --- a/src/telemetry/database/managementDB.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, time -import sqlalchemy -import sqlalchemy_utils -from sqlalchemy.orm import sessionmaker -from sqlalchemy.ext.declarative import declarative_base -from telemetry.database.TelemetryEngine import TelemetryEngine -from telemetry.database.TelemetryModel import Base - -LOGGER = logging.getLogger(__name__) -DB_NAME = "telemetryfrontend" - -# # Create a base class for declarative models -# Base = declarative_base() - -class managementDB: - def __init__(self): - self.db_engine = TelemetryEngine.get_engine() - if self.db_engine is None: - LOGGER.error('Unable to get SQLAlchemy DB Engine...') - return False - self.db_name = DB_NAME - self.Session = sessionmaker(bind=self.db_engine) - - @staticmethod - def create_database(engine : sqlalchemy.engine.Engine) -> None: - if not sqlalchemy_utils.database_exists(engine.url): - LOGGER.info("Database created. {:}".format(engine.url)) - sqlalchemy_utils.create_database(engine.url) - - @staticmethod - def drop_database(engine : sqlalchemy.engine.Engine) -> None: - if sqlalchemy_utils.database_exists(engine.url): - sqlalchemy_utils.drop_database(engine.url) - - # def create_database(self): - # try: - # with self.db_engine.connect() as connection: - # connection.execute(f"CREATE DATABASE {self.db_name};") - # LOGGER.info('managementDB initalizes database. Name: {self.db_name}') - # return True - # except: - # LOGGER.exception('Failed to check/create the database: {:s}'.format(str(self.db_engine.url))) - # return False - - @staticmethod - def create_tables(engine : sqlalchemy.engine.Engine): - try: - Base.metadata.create_all(engine) # type: ignore - LOGGER.info("Tables created in the DB Name: {:}".format(DB_NAME)) - except Exception as e: - LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e))) - - def verify_tables(self): - try: - with self.db_engine.connect() as connection: - result = connection.execute("SHOW TABLES;") - tables = result.fetchall() # type: ignore - LOGGER.info("Tables verified: {:}".format(tables)) - except Exception as e: - LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) - - @staticmethod - def add_row_to_db(self, row): - session = self.Session() - try: - session.add(row) - session.commit() - LOGGER.info(f"Row inserted into {row.__class__.__name__} table.") - except Exception as e: - session.rollback() - LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") - finally: - session.close() - - def search_db_row_by_id(self, model, col_name, id_to_search): - session = self.Session() - try: - entity = session.query(model).filter_by(**{col_name: id_to_search}).first() - if entity: - LOGGER.info(f"{model.__name__} ID found: {str(entity)}") - return entity - else: - LOGGER.warning(f"{model.__name__} ID not found: {str(id_to_search)}") - return None - except Exception as e: - session.rollback() - LOGGER.info(f"Failed to retrieve {model.__name__} ID. {str(e)}") - raise - finally: - session.close() - - def delete_db_row_by_id(self, model, col_name, id_to_search): - session = self.Session() - try: - record = session.query(model).filter_by(**{col_name: id_to_search}).first() - if record: - session.delete(record) - session.commit() - LOGGER.info("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) - else: - LOGGER.warning("%s with %s %s not found", model.__name__, col_name, id_to_search) - except Exception as e: - session.rollback() - LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) - finally: - session.close() - - def select_with_filter(self, model, **filters): - session = self.Session() - try: - query = session.query(model) - for column, value in filters.items(): - query = query.filter(getattr(model, column) == value) # type: ignore - result = query.all() - if result: - LOGGER.info(f"Fetched filtered rows from {model.__name__} table with filters: {filters}") # - Results: {result} - else: - LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filters}") - return result - except Exception as e: - LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filters} ::: {e}") - return [] - finally: - session.close() \ No newline at end of file diff --git a/src/telemetry/database/tests/telemetryDBtests.py b/src/telemetry/database/tests/telemetryDBtests.py deleted file mode 100644 index 0d221106419d6e4ee4b313adf10c90c5e6be7666..0000000000000000000000000000000000000000 --- a/src/telemetry/database/tests/telemetryDBtests.py +++ /dev/null @@ -1,86 +0,0 @@ - -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Any -from sqlalchemy.ext.declarative import declarative_base -from telemetry.database.TelemetryDBmanager import TelemetryDBmanager -from telemetry.database.TelemetryEngine import TelemetryEngine -from telemetry.database.tests import temp_DB -from .messages import create_kpi_request, create_collector_request, \ - create_kpi_id_request, create_kpi_filter_request, \ - create_collector_id_request, create_collector_filter_request - -logging.basicConfig(level=logging.INFO) -LOGGER = logging.getLogger(__name__) - - -# def test_temp_DB(): -# temp_DB.main() - -def test_telemetry_object_creation(): - LOGGER.info('--- test_telemetry_object_creation: START') - - LOGGER.info('>>> Creating TelemetryDBmanager Object <<< ') - TelemetryDBmanagerObj = TelemetryDBmanager() - TelemetryEngine.create_database(TelemetryDBmanagerObj.db_engine) # creates 'frontend' db, if it doesnot exists. - - LOGGER.info('>>> Creating database <<< ') - TelemetryDBmanagerObj.create_database() - - LOGGER.info('>>> verifing database <<< ') - TelemetryDBmanagerObj.list_databases() - - # # LOGGER.info('>>> Droping Tables: ') - # # TelemetryDBmanagerObj.drop_table("table_naem_here") - - LOGGER.info('>>> Creating Tables <<< ') - TelemetryDBmanagerObj.create_tables() - - LOGGER.info('>>> Verifing Table creation <<< ') - TelemetryDBmanagerObj.verify_tables() - - # LOGGER.info('>>> TESTING: Row Insertion Operation: kpi Table <<<') - # kpi_obj = create_kpi_request() - # TelemetryDBmanagerObj.inser_kpi(kpi_obj) - - # LOGGER.info('>>> TESTING: Row Insertion Operation: collector Table <<<') - # collector_obj = create_collector_request() - # TelemetryDBmanagerObj.insert_collector(collector_obj) - - # LOGGER.info('>>> TESTING: Get KpiDescriptor <<<') - # kpi_id_obj = create_kpi_id_request() - # TelemetryDBmanagerObj.get_kpi_descriptor(kpi_id_obj) - - # LOGGER.info('>>> TESTING: Select Collector <<<') - # collector_id_obj = create_collector_id_request() - # TelemetryDBmanagerObj.get_collector(collector_id_obj) - - # LOGGER.info('>>> TESTING: Applying kpi filter <<< ') - # kpi_filter : dict[str, Any] = create_kpi_filter_request() - # TelemetryDBmanagerObj.select_kpi_descriptor(**kpi_filter) - - # LOGGER.info('>>> TESTING: Applying collector filter <<<') - # collector_filter : dict[str, Any] = create_collector_filter_request() - # TelemetryDBmanagerObj.select_collector(**collector_filter) - - # LOGGER.info('>>> TESTING: Delete KpiDescriptor ') - # kpi_id_obj = create_kpi_id_request() - # TelemetryDBmanagerObj.delete_kpi_descriptor(kpi_id_obj) - - # LOGGER.info('>>> TESTING: Delete Collector ') - # collector_id_obj = create_collector_id_request() - # TelemetryDBmanagerObj.delete_collector(collector_id_obj) - \ No newline at end of file diff --git a/src/telemetry/database/tests/temp_DB.py b/src/telemetry/database/tests/temp_DB.py deleted file mode 100644 index 089d3542492c2da87b839416f7118749bb82caad..0000000000000000000000000000000000000000 --- a/src/telemetry/database/tests/temp_DB.py +++ /dev/null @@ -1,327 +0,0 @@ -from sqlalchemy import create_engine, Column, String, Integer, Text, Float, ForeignKey -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker, relationship -from sqlalchemy.dialects.postgresql import UUID -import logging - -LOGGER = logging.getLogger(__name__) -Base = declarative_base() - -class Kpi(Base): - __tablename__ = 'kpi' - - kpi_id = Column(UUID(as_uuid=False), primary_key=True) - kpi_description = Column(Text) - kpi_sample_type = Column(Integer) - device_id = Column(String) - endpoint_id = Column(String) - service_id = Column(String) - slice_id = Column(String) - connection_id = Column(String) - link_id = Column(String) - - collectors = relationship('Collector', back_populates='kpi') - - def __repr__(self): - return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', " - f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', " - f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', " - f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', " - f"link_id='{self.link_id}')>") - -class Collector(Base): - __tablename__ = 'collector' - - collector_id = Column(UUID(as_uuid=False), primary_key=True) - kpi_id = Column(UUID(as_uuid=False), ForeignKey('kpi.kpi_id')) - collector = Column(String) - sampling_duration_s = Column(Float) - sampling_interval_s = Column(Float) - start_timestamp = Column(Float) - end_timestamp = Column(Float) - - kpi = relationship('Kpi', back_populates='collectors') - - def __repr__(self): - return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', " - f"collector='{self.collector}', sampling_duration_s='{self.sampling_duration_s}', " - f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', " - f"end_timestamp='{self.end_timestamp}')>") - -class DatabaseManager: - def __init__(self, db_url, db_name): - self.engine = create_engine(db_url) - self.db_name = db_name - self.Session = sessionmaker(bind=self.engine) - LOGGER.info("DatabaseManager initialized with DB URL: %s and DB Name: %s", db_url, db_name) - - def create_database(self): - try: - with self.engine.connect() as connection: - connection.execute(f"CREATE DATABASE {self.db_name};") - LOGGER.info("Database '%s' created successfully.", self.db_name) - except Exception as e: - LOGGER.error("Error creating database '%s': %s", self.db_name, e) - finally: - LOGGER.info("create_database method execution finished.") - - def create_tables(self): - try: - Base.metadata.create_all(self.engine) - LOGGER.info("Tables created successfully.") - except Exception as e: - LOGGER.error("Error creating tables: %s", e) - finally: - LOGGER.info("create_tables method execution finished.") - - def verify_table_creation(self): - try: - with self.engine.connect() as connection: - result = connection.execute("SHOW TABLES;") - tables = result.fetchall() - LOGGER.info("Tables verified: %s", tables) - return tables - except Exception as e: - LOGGER.error("Error verifying table creation: %s", e) - return [] - finally: - LOGGER.info("verify_table_creation method execution finished.") - - def insert_row_kpi(self, kpi_data): - session = self.Session() - try: - new_kpi = Kpi(**kpi_data) - session.add(new_kpi) - session.commit() - LOGGER.info("Inserted row into KPI table: %s", kpi_data) - except Exception as e: - session.rollback() - LOGGER.error("Error inserting row into KPI table: %s", e) - finally: - session.close() - LOGGER.info("insert_row_kpi method execution finished.") - - def insert_row_collector(self, collector_data): - session = self.Session() - try: - new_collector = Collector(**collector_data) - session.add(new_collector) - session.commit() - LOGGER.info("Inserted row into Collector table: %s", collector_data) - except Exception as e: - session.rollback() - LOGGER.error("Error inserting row into Collector table: %s", e) - finally: - session.close() - LOGGER.info("insert_row_collector method execution finished.") - - def verify_insertion_kpi(self, kpi_id): - session = self.Session() - try: - kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first() - LOGGER.info("Verified insertion in KPI table for kpi_id: %s, Result: %s", kpi_id, kpi) - return kpi - except Exception as e: - LOGGER.error("Error verifying insertion in KPI table for kpi_id %s: %s", kpi_id, e) - return None - finally: - session.close() - LOGGER.info("verify_insertion_kpi method execution finished.") - - def verify_insertion_collector(self, collector_id): - session = self.Session() - try: - collector = session.query(Collector).filter_by(collector_id=collector_id).first() - LOGGER.info("Verified insertion in Collector table for collector_id: %s, Result: %s", collector_id, collector) - return collector - except Exception as e: - LOGGER.error("Error verifying insertion in Collector table for collector_id %s: %s", collector_id, e) - return None - finally: - session.close() - LOGGER.info("verify_insertion_collector method execution finished.") - - def get_all_kpi_rows(self): - session = self.Session() - try: - kpi_rows = session.query(Kpi).all() - LOGGER.info("Fetched all rows from KPI table: %s", kpi_rows) - return kpi_rows - except Exception as e: - LOGGER.error("Error fetching all rows from KPI table: %s", e) - return [] - finally: - session.close() - LOGGER.info("get_all_kpi_rows method execution finished.") - - def get_all_collector_rows(self): - session = self.Session() - try: - collector_rows = session.query(Collector).all() - LOGGER.info("Fetched all rows from Collector table: %s", collector_rows) - return collector_rows - except Exception as e: - LOGGER.error("Error fetching all rows from Collector table: %s", e) - return [] - finally: - session.close() - LOGGER.info("get_all_collector_rows method execution finished.") - - def get_filtered_kpi_rows(self, **filters): - session = self.Session() - try: - query = session.query(Kpi) - for column, value in filters.items(): - query = query.filter(getattr(Kpi, column) == value) - result = query.all() - LOGGER.info("Fetched filtered rows from KPI table with filters ---------- : {:s}".format(str(result))) - return result - except NoResultFound: - LOGGER.warning("No results found in KPI table with filters %s", filters) - return [] - except Exception as e: - LOGGER.error("Error fetching filtered rows from KPI table with filters %s: %s", filters, e) - return [] - finally: - session.close() - LOGGER.info("get_filtered_kpi_rows method execution finished.") - - def get_filtered_collector_rows(self, **filters): - session = self.Session() - try: - query = session.query(Collector) - for column, value in filters.items(): - query = query.filter(getattr(Collector, column) == value) - result = query.all() - LOGGER.info("Fetched filtered rows from Collector table with filters %s: %s", filters, result) - return result - except NoResultFound: - LOGGER.warning("No results found in Collector table with filters %s", filters) - return [] - except Exception as e: - LOGGER.error("Error fetching filtered rows from Collector table with filters %s: %s", filters, e) - return [] - finally: - session.close() - LOGGER.info("get_filtered_collector_rows method execution finished.") - - def delete_kpi_by_id(self, kpi_id): - session = self.Session() - try: - kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first() - if kpi: - session.delete(kpi) - session.commit() - LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id) - else: - LOGGER.warning("KPI with kpi_id %s not found", kpi_id) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id, e) - finally: - session.close() - LOGGER.info("delete_kpi_by_id method execution finished.") - - def delete_collector_by_id(self, collector_id): - session = self.Session() - try: - collector = session.query(Collector).filter_by(collector_id=collector_id).first() - if collector: - session.delete(collector) - session.commit() - LOGGER.info("Deleted Collector with collector_id: %s", collector_id) - else: - LOGGER.warning("Collector with collector_id %s not found", collector_id) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting Collector with collector_id %s: %s", collector_id, e) - finally: - session.close() - LOGGER.info("delete_collector_by_id method execution finished.") - - -# Example Usage -def main(): - CRDB_SQL_PORT = "26257" - CRDB_DATABASE = "telemetryfrontend" - CRDB_USERNAME = "tfs" - CRDB_PASSWORD = "tfs123" - CRDB_SSLMODE = "require" - CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}' - crdb_uri = CRDB_URI_TEMPLATE.format( - CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) - # db_url = "cockroachdb://username:password@localhost:26257/" - # db_name = "yourdatabase" - db_manager = DatabaseManager(crdb_uri, CRDB_DATABASE) - - # Create database - db_manager.create_database() - - # Update db_url to include the new database name - db_manager.engine = create_engine(f"{crdb_uri}") - db_manager.Session = sessionmaker(bind=db_manager.engine) - - # Create tables - db_manager.create_tables() - - # Verify table creation - tables = db_manager.verify_table_creation() - LOGGER.info('Tables in the database: {:s}'.format(str(tables))) - - # Insert a row into the KPI table - kpi_data = { - 'kpi_id': '123e4567-e89b-12d3-a456-426614174100', - 'kpi_description': 'Sample KPI', - 'kpi_sample_type': 1, - 'device_id': 'device_1', - 'endpoint_id': 'endpoint_1', - 'service_id': 'service_1', - 'slice_id': 'slice_1', - 'connection_id': 'conn_1', - 'link_id': 'link_1' - } - db_manager.insert_row_kpi(kpi_data) - - # Insert a row into the Collector table - collector_data = { - 'collector_id': '123e4567-e89b-12d3-a456-426614174101', - 'kpi_id': '123e4567-e89b-12d3-a456-426614174000', - 'collector': 'Collector 1', - 'sampling_duration_s': 60.0, - 'sampling_interval_s': 10.0, - 'start_timestamp': 1625247600.0, - 'end_timestamp': 1625247660.0 - } - db_manager.insert_row_collector(collector_data) - - # Verify insertion into KPI table - kpi = db_manager.verify_insertion_kpi('123e4567-e89b-12d3-a456-426614174000') - print("Inserted KPI:", kpi) - - # Verify insertion into Collector table - collector = db_manager.verify_insertion_collector('123e4567-e89b-12d3-a456-426614174001') - print("Inserted Collector:", collector) - - # Get all rows from KPI table - all_kpi_rows = db_manager.get_all_kpi_rows() - LOGGER.info("All KPI Rows: %s", all_kpi_rows) - - # Get all rows from Collector table - all_collector_rows = db_manager.get_all_collector_rows() - LOGGER.info("All Collector Rows: %s", all_collector_rows) - - # Get filtered rows from KPI table - filtered_kpi_rows = db_manager.get_filtered_kpi_rows(kpi_description='Sample KPI') - LOGGER.info("Filtered KPI Rows: %s", filtered_kpi_rows) - - # Get filtered rows from Collector table - filtered_collector_rows = db_manager.get_filtered_collector_rows(collector='Collector 1') - LOGGER.info("Filtered Collector Rows: %s", filtered_collector_rows) - - # Delete a KPI by kpi_id - kpi_id_to_delete = '123e4567-e89b-12d3-a456-426614174000' - db_manager.delete_kpi_by_id(kpi_id_to_delete) - - # Delete a Collector by collector_id - collector_id_to_delete = '123e4567-e89b-12d3-a456-426614174001' - db_manager.delete_collector_by_id(collector_id_to_delete) diff --git a/src/telemetry/frontend/Dockerfile b/src/telemetry/frontend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7125d31fe74f7c44a52c2783369c2dc7a4a31160 --- /dev/null +++ b/src/telemetry/frontend/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/telemetry/frontend +WORKDIR /var/teraflow/telemetry/frontend +COPY src/telemetry/frontend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/telemetry/__init__.py telemetry/__init__.py +COPY src/telemetry/frontend/. telemetry/frontend/ +COPY src/telemetry/database/. telemetry/database/ + +# Start the service +ENTRYPOINT ["python", "-m", "telemetry.frontend.service"] diff --git a/src/telemetry/frontend/requirements.in b/src/telemetry/frontend/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..231dc04e820387c95ffea72cbe67b9f0a9a0865a --- /dev/null +++ b/src/telemetry/frontend/requirements.in @@ -0,0 +1,19 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +confluent-kafka==2.3.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/telemetry/frontend/service/TelemetryFrontendService.py b/src/telemetry/frontend/service/TelemetryFrontendService.py index dc3f8df363a882db0f0ba3112a38f3bba3921c30..abd361aa0082e2de1d1f5fa7e81a336f3091af9a 100644 --- a/src/telemetry/frontend/service/TelemetryFrontendService.py +++ b/src/telemetry/frontend/service/TelemetryFrontendService.py @@ -14,17 +14,16 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc -from monitoring.service.NameMapping import NameMapping from common.tools.service.GenericGrpcService import GenericGrpcService from common.proto.telemetry_frontend_pb2_grpc import add_TelemetryFrontendServiceServicer_to_server from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl class TelemetryFrontendService(GenericGrpcService): - def __init__(self, name_mapping : NameMapping, cls_name: str = __name__) -> None: + def __init__(self, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND) super().__init__(port, cls_name=cls_name) - self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl(name_mapping) + self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl() def install_servicers(self): add_TelemetryFrontendServiceServicer_to_server(self.telemetry_frontend_servicer, self.server) diff --git a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py index e6830ad676d3934c88b01575ebdd1d0549fb00d1..b73d9fa952ee42aeb7adb8f3c0b2e4a3ba7f3e09 100644 --- a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py +++ b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py @@ -12,126 +12,167 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ast +import json import threading -import time -from typing import Tuple, Any +from typing import Any, Dict import grpc import logging -from confluent_kafka import Consumer as KafkaConsumer +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from common.proto.context_pb2 import Empty -from monitoring.service.NameMapping import NameMapping -from confluent_kafka import Producer as KafkaProducer -from confluent_kafka import KafkaException -from confluent_kafka import KafkaError from common.proto.telemetry_frontend_pb2 import CollectorId, Collector, CollectorFilter, CollectorList -from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceServicer from telemetry.database.TelemetryModel import Collector as CollectorModel -from telemetry.database.managementDB import managementDB +from telemetry.database.Telemetry_DB import TelemetryDB + +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import Producer as KafkaProducer +from confluent_kafka import KafkaError + LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Monitoring', 'TelemetryFrontend') -KAFKA_SERVER_IP = '127.0.0.1:9092' -ACTIVE_COLLECTORS = [] -KAFKA_TOPICS = {'request' : 'topic_request', - 'response': 'topic_response'} +METRICS_POOL = MetricsPool('TelemetryFrontend', 'NBIgRPC') +ACTIVE_COLLECTORS = [] # keep and can be populated from DB class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer): - def __init__(self, name_mapping : NameMapping): + def __init__(self): LOGGER.info('Init TelemetryFrontendService') - self.managementDBobj = managementDB() - self.kafka_producer = KafkaProducer({'bootstrap.servers': KAFKA_SERVER_IP,}) - self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KAFKA_SERVER_IP, - 'group.id' : 'frontend', - 'auto.offset.reset' : 'latest'}) - - def add_collector_to_db(self, request: Collector ): # type: ignore - try: - # Create a new Collector instance - collector_to_insert = CollectorModel() - collector_to_insert.collector_id = request.collector_id.collector_id.uuid - collector_to_insert.kpi_id = request.kpi_id.kpi_id.uuid - # collector_to_insert.collector_decription= request.collector - collector_to_insert.sampling_duration_s = request.duration_s - collector_to_insert.sampling_interval_s = request.interval_s - collector_to_insert.start_timestamp = time.time() - collector_to_insert.end_timestamp = time.time() - managementDB.add_row_to_db(collector_to_insert) - except Exception as e: - LOGGER.info("Unable to create collectorModel class object. {:}".format(e)) + self.tele_db_obj = TelemetryDB() + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'frontend', + 'auto.offset.reset' : 'latest'}) - # @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def StartCollector(self, request : Collector, grpc_context: grpc.ServicerContext # type: ignore ) -> CollectorId: # type: ignore - # push info to frontend db LOGGER.info ("gRPC message: {:}".format(request)) response = CollectorId() - _collector_id = str(request.collector_id.collector_id.uuid) - _collector_kpi_id = str(request.kpi_id.kpi_id.uuid) - _collector_duration = int(request.duration_s) - _collector_interval = int(request.interval_s) - # pushing Collector to DB - self.add_collector_to_db(request) - self.publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval) - # self.run_publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval) - response.collector_id.uuid = request.collector_id.collector_id.uuid # type: ignore + + # TODO: Verify the presence of Kpi ID in KpiDB or assume that KPI ID already exists? + self.tele_db_obj.add_row_to_db( + CollectorModel.ConvertCollectorToRow(request) + ) + self.PublishStartRequestOnKafka(request) + + response.collector_id.uuid = request.collector_id.collector_id.uuid return response - - def run_publish_to_kafka_request_topic(self, msg_key: str, kpi: str, duration : int, interval: int): - # Add threading.Thread() response to dictonary and call start() in the next statement - threading.Thread(target=self.publish_to_kafka_request_topic, args=(msg_key, kpi, duration, interval)).start() - - def publish_to_kafka_request_topic(self, - collector_id: str, kpi: str, duration : int, interval: int - ): + + def PublishStartRequestOnKafka(self, collector_obj): + """ + Method to generate collector request on Kafka. + """ + collector_uuid = collector_obj.collector_id.collector_id.uuid + collector_to_generate : Dict = { + "kpi_id" : collector_obj.kpi_id.kpi_id.uuid, + "duration": collector_obj.duration_s, + "interval": collector_obj.interval_s + } + self.kafka_producer.produce( + KafkaTopic.REQUEST.value, + key = collector_uuid, + value = json.dumps(collector_to_generate), + callback = self.delivery_callback + ) + LOGGER.info("Collector Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_generate)) + ACTIVE_COLLECTORS.append(collector_uuid) + self.kafka_producer.flush() + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StopCollector(self, + request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore + ) -> Empty: # type: ignore + LOGGER.info ("gRPC message: {:}".format(request)) + try: + collector_to_delete = request.collector_id.uuid + self.tele_db_obj.delete_db_row_by_id( + CollectorModel, "collector_id", collector_to_delete + ) + self.PublishStopRequestOnKafka(request) + except Exception as e: + LOGGER.error('Unable to delete collector. Error: {:}'.format(e)) + return Empty() + + def PublishStopRequestOnKafka(self, collector_id): """ - Method to generate collector request to Kafka topic. + Method to generate stop collector request on Kafka. """ - # time.sleep(5) - # producer_configs = { - # 'bootstrap.servers': KAFKA_SERVER_IP, - # } - # topic_request = "topic_request" - msg_value : Tuple [str, int, int] = (kpi, duration, interval) - # print ("Request generated: ", "Colletcor Id: ", collector_id, \ - # ", \nKPI: ", kpi, ", Duration: ", duration, ", Interval: ", interval) - # producerObj = KafkaProducer(producer_configs) - self.kafka_producer.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback) - # producerObj.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback) - LOGGER.info("Collector Request Generated: {:}, {:}, {:}, {:}".format(collector_id, kpi, duration, interval)) - # producerObj.produce(topic_request, key=collector_id, value= str(msg_value), callback=self.delivery_callback) - ACTIVE_COLLECTORS.append(collector_id) + collector_uuid = collector_id.collector_id.uuid + collector_to_stop : Dict = { + "kpi_id" : collector_uuid, + "duration": -1, + "interval": -1 + } + self.kafka_producer.produce( + KafkaTopic.REQUEST.value, + key = collector_uuid, + value = json.dumps(collector_to_stop), + callback = self.delivery_callback + ) + LOGGER.info("Collector Stop Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_stop)) + try: + ACTIVE_COLLECTORS.remove(collector_uuid) + except ValueError: + LOGGER.warning('Collector ID {:} not found in active collector list'.format(collector_uuid)) self.kafka_producer.flush() - def run_kafka_listener(self): - # print ("--- STARTED: run_kafka_listener ---") - threading.Thread(target=self.kafka_listener).start() + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SelectCollectors(self, + request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore + ) -> CollectorList: # type: ignore + LOGGER.info("gRPC message: {:}".format(request)) + response = CollectorList() + + try: + rows = self.tele_db_obj.select_with_filter(CollectorModel, request) + except Exception as e: + LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e)) + try: + for row in rows: + collector_obj = CollectorModel.ConvertRowToCollector(row) + response.collector_list.append(collector_obj) + return response + except Exception as e: + LOGGER.info('Unable to process filter response {:}'.format(e)) + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def delivery_callback(self, err, msg): + """ + Callback function to handle message delivery status. + Args: + err (KafkaError): Kafka error object. + msg (Message): Kafka message object. + """ + if err: + LOGGER.debug('Message delivery failed: {:}'.format(err)) + print('Message delivery failed: {:}'.format(err)) + # else: + # LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) + # print('Message delivered to topic {:}'.format(msg.topic())) + + # ---------- Independent Method --------------- + # Listener method is independent of any method (same lifetime as service) + # continously listens for responses + def RunResponseListener(self): + threading.Thread(target=self.ResponseListener).start() return True - def kafka_listener(self): + def ResponseListener(self): """ listener for response on Kafka topic. """ - # # print ("--- STARTED: kafka_listener ---") - # conusmer_configs = { - # 'bootstrap.servers' : KAFKA_SERVER_IP, - # 'group.id' : 'frontend', - # 'auto.offset.reset' : 'latest' - # } - # # topic_response = "topic_response" - - # consumerObj = KafkaConsumer(conusmer_configs) - self.kafka_consumer.subscribe([KAFKA_TOPICS['response']]) - # print (time.time()) + self.kafka_consumer.subscribe([KafkaTopic.RESPONSE.value]) while True: receive_msg = self.kafka_consumer.poll(2.0) if receive_msg is None: - # print (" - Telemetry frontend listening on Kafka Topic: ", KAFKA_TOPICS['response']) # added for debugging purposes continue elif receive_msg.error(): if receive_msg.error().code() == KafkaError._PARTITION_EOF: @@ -142,63 +183,16 @@ class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer): try: collector_id = receive_msg.key().decode('utf-8') if collector_id in ACTIVE_COLLECTORS: - (kpi_id, kpi_value) = ast.literal_eval(receive_msg.value().decode('utf-8')) - self.process_response(collector_id, kpi_id, kpi_value) + kpi_value = json.loads(receive_msg.value().decode('utf-8')) + self.process_response(collector_id, kpi_value['kpi_id'], kpi_value['kpi_value']) else: print(f"collector id does not match.\nRespone ID: '{collector_id}' --- Active IDs: '{ACTIVE_COLLECTORS}' ") except Exception as e: - print(f"No message key found: {str(e)}") + print(f"Error extarcting msg key or value: {str(e)}") continue - # return None def process_response(self, collector_id: str, kpi_id: str, kpi_value: Any): if kpi_id == "-1" and kpi_value == -1: - # LOGGER.info("Sucessfully terminated Collector: {:}".format(collector_id)) - print ("Sucessfully terminated Collector: ", collector_id) + print ("Backend termination confirmation for collector id: ", collector_id) else: - print ("Frontend-Received values Collector Id:", collector_id, "-KPI:", kpi_id, "-VALUE:", kpi_value) - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def delivery_callback(self, err, msg): - """ - Callback function to handle message delivery status. - Args: - err (KafkaError): Kafka error object. - msg (Message): Kafka message object. - """ - if err: - print(f'Message delivery failed: {err}') - else: - print(f'Message delivered to topic {msg.topic()}') - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def StopCollector(self, - request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore - ) -> Empty: # type: ignore - LOGGER.info ("gRPC message: {:}".format(request)) - _collector_id = request.collector_id.uuid - self.publish_to_kafka_request_topic(_collector_id, "", -1, -1) - return Empty() - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def SelectCollectors(self, - request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore - ) -> CollectorList: # type: ignore - LOGGER.info("gRPC message: {:}".format(request)) - response = CollectorList() - filter_to_apply = dict() - filter_to_apply['kpi_id'] = request.kpi_id[0].kpi_id.uuid - # filter_to_apply['duration_s'] = request.duration_s[0] - try: - rows = self.managementDBobj.select_with_filter(CollectorModel, **filter_to_apply) - except Exception as e: - LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e)) - try: - if len(rows) != 0: - for row in rows: - collector_obj = Collector() - collector_obj.collector_id.collector_id.uuid = row.collector_id - response.collector_list.append(collector_obj) - return response - except Exception as e: - LOGGER.info('Unable to process response {:}'.format(e)) \ No newline at end of file + print ("KPI Value: Collector Id:", collector_id, ", Kpi Id:", kpi_id, ", Value:", kpi_value) diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py index 3b0263706c3dad3756306d1ba8a3a104d568cd6f..2a6c5dbcf2da6b6a074c2b8ee23791bc4896442f 100644 --- a/src/telemetry/frontend/service/__main__.py +++ b/src/telemetry/frontend/service/__main__.py @@ -12,16 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import signal -import sys -import logging, threading +import logging, signal, sys, threading from prometheus_client import start_http_server -from monitoring.service.NameMapping import NameMapping +from common.Settings import get_log_level, get_metrics_port from .TelemetryFrontendService import TelemetryFrontendService -from monitoring.service.EventTools import EventsDeviceCollector -from common.Settings import ( - get_log_level, wait_for_environment_variables, get_env_var_name, - get_metrics_port ) terminate = threading.Event() LOGGER = None @@ -31,20 +25,12 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name terminate.set() def main(): - global LOGGER + global LOGGER # pylint: disable=global-statement log_level = get_log_level() logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) -# ------- will be added later -------------- - # wait_for_environment_variables([ - # get_env_var_name - - - # ]) -# ------- will be added later -------------- - signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) @@ -54,9 +40,7 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) - name_mapping = NameMapping() - - grpc_service = TelemetryFrontendService(name_mapping) + grpc_service = TelemetryFrontendService() grpc_service.start() # Wait for Ctrl+C or termination signal @@ -69,4 +53,4 @@ def main(): return 0 if __name__ == '__main__': - sys.exit(main()) \ No newline at end of file + sys.exit(main()) diff --git a/src/telemetry/frontend/tests/Messages.py b/src/telemetry/frontend/tests/Messages.py index 1205898d13a610cd262979242e4f489f5e35cdb8..a0e93e8a121b9efaac83f7169419911c8ee6e3ea 100644 --- a/src/telemetry/frontend/tests/Messages.py +++ b/src/telemetry/frontend/tests/Messages.py @@ -16,68 +16,27 @@ import uuid import random from common.proto import telemetry_frontend_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType - +from common.proto.kpi_manager_pb2 import KpiId # ----------------------- "2nd" Iteration -------------------------------- def create_collector_id(): _collector_id = telemetry_frontend_pb2.CollectorId() - _collector_id.collector_id.uuid = uuid.uuid4() + # _collector_id.collector_id.uuid = str(uuid.uuid4()) + _collector_id.collector_id.uuid = "5d45f53f-d567-429f-9427-9196ac72ff0c" return _collector_id -# def create_collector_id_a(coll_id_str : str): -# _collector_id = telemetry_frontend_pb2.CollectorId() -# _collector_id.collector_id.uuid = str(coll_id_str) -# return _collector_id - def create_collector_request(): _create_collector_request = telemetry_frontend_pb2.Collector() _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4()) - _create_collector_request.kpi_id.kpi_id.uuid = "165d20c5-a446-42fa-812f-e2b7ed283c6f" - # _create_collector_request.collector = "collector description" + _create_collector_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) _create_collector_request.duration_s = float(random.randint(8, 16)) _create_collector_request.interval_s = float(random.randint(2, 4)) return _create_collector_request def create_collector_filter(): _create_collector_filter = telemetry_frontend_pb2.CollectorFilter() - new_kpi_id = _create_collector_filter.kpi_id.add() - new_kpi_id.kpi_id.uuid = "165d20c5-a446-42fa-812f-e2b7ed283c6f" + kpi_id_obj = KpiId() + # kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + kpi_id_obj.kpi_id.uuid = "a7237fa3-caf4-479d-84b6-4d9f9738fb7f" + _create_collector_filter.kpi_id.append(kpi_id_obj) return _create_collector_filter - -# ----------------------- "First" Iteration -------------------------------- -# def create_collector_request_a(): -# _create_collector_request_a = telemetry_frontend_pb2.Collector() -# _create_collector_request_a.collector_id.collector_id.uuid = "-1" -# return _create_collector_request_a - -# def create_collector_request_b(str_kpi_id, coll_duration_s, coll_interval_s -# ) -> telemetry_frontend_pb2.Collector: -# _create_collector_request_b = telemetry_frontend_pb2.Collector() -# _create_collector_request_b.collector_id.collector_id.uuid = '1' -# _create_collector_request_b.kpi_id.kpi_id.uuid = str_kpi_id -# _create_collector_request_b.duration_s = coll_duration_s -# _create_collector_request_b.interval_s = coll_interval_s -# return _create_collector_request_b - -# def create_collector_filter(): -# _create_collector_filter = telemetry_frontend_pb2.CollectorFilter() -# new_collector_id = _create_collector_filter.collector_id.add() -# new_collector_id.collector_id.uuid = "COLL1" -# new_kpi_id = _create_collector_filter.kpi_id.add() -# new_kpi_id.kpi_id.uuid = "KPI1" -# new_device_id = _create_collector_filter.device_id.add() -# new_device_id.device_uuid.uuid = 'DEV1' -# new_service_id = _create_collector_filter.service_id.add() -# new_service_id.service_uuid.uuid = 'SERV1' -# new_slice_id = _create_collector_filter.slice_id.add() -# new_slice_id.slice_uuid.uuid = 'SLC1' -# new_endpoint_id = _create_collector_filter.endpoint_id.add() -# new_endpoint_id.endpoint_uuid.uuid = 'END1' -# new_connection_id = _create_collector_filter.connection_id.add() -# new_connection_id.connection_uuid.uuid = 'CON1' -# _create_collector_filter.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED) -# return _create_collector_filter - -# def create_collector_list(): -# _create_collector_list = telemetry_frontend_pb2.CollectorList() -# return _create_collector_list \ No newline at end of file diff --git a/src/telemetry/frontend/tests/test_frontend.py b/src/telemetry/frontend/tests/test_frontend.py index 002cc430721845aa5aa18274375e2c22b5d77ff7..9c3f9d3a8f545792eb2bb3a371c6c20664d24f69 100644 --- a/src/telemetry/frontend/tests/test_frontend.py +++ b/src/telemetry/frontend/tests/test_frontend.py @@ -13,129 +13,40 @@ # limitations under the License. import os -import time import pytest import logging -from typing import Union -from common.proto.context_pb2 import Empty from common.Constants import ServiceNameEnum from common.proto.telemetry_frontend_pb2 import CollectorId, CollectorList -from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server -from context.client.ContextClient import ContextClient -from common.tools.service.GenericGrpcService import GenericGrpcService -from common.tests.MockServicerImpl_Context import MockServicerImpl_Context +from common.proto.context_pb2 import Empty +from common.tools.kafka.Variables import KafkaTopic from common.Settings import ( get_service_port_grpc, get_env_var_name, ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC) from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient from telemetry.frontend.service.TelemetryFrontendService import TelemetryFrontendService +from telemetry.frontend.tests.Messages import ( + create_collector_request, create_collector_id, create_collector_filter) from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl -from telemetry.frontend.tests.Messages import ( create_collector_request, create_collector_filter) -from telemetry.database.managementDB import managementDB -from telemetry.database.TelemetryEngine import TelemetryEngine - -from device.client.DeviceClient import DeviceClient -from device.service.DeviceService import DeviceService -from device.service.driver_api.DriverFactory import DriverFactory -from device.service.driver_api.DriverInstanceCache import DriverInstanceCache -from monitoring.service.NameMapping import NameMapping - -os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' -from device.service.drivers import DRIVERS ########################### # Tests Setup ########################### LOCAL_HOST = '127.0.0.1' -MOCKSERVICE_PORT = 10000 -TELEMETRY_FRONTEND_PORT = str(MOCKSERVICE_PORT) + str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)) +TELEMETRY_FRONTEND_PORT = str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)) os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(TELEMETRY_FRONTEND_PORT) LOGGER = logging.getLogger(__name__) -class MockContextService(GenericGrpcService): - # Mock Service implementing Context to simplify unitary tests of Monitoring - - def __init__(self, bind_port: Union[str, int]) -> None: - super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') - - # pylint: disable=attribute-defined-outside-init - def install_servicers(self): - self.context_servicer = MockServicerImpl_Context() - add_ContextServiceServicer_to_server(self.context_servicer, self.server) - @pytest.fixture(scope='session') -def context_service(): - LOGGER.info('Initializing MockContextService...') - _service = MockContextService(MOCKSERVICE_PORT) - _service.start() - - LOGGER.info('Yielding MockContextService...') - yield _service - - LOGGER.info('Terminating MockContextService...') - _service.context_servicer.msg_broker.terminate() - _service.stop() - - LOGGER.info('Terminated MockContextService...') - -@pytest.fixture(scope='session') -def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument - LOGGER.info('Initializing ContextClient...') - _client = ContextClient() - - LOGGER.info('Yielding ContextClient...') - yield _client - - LOGGER.info('Closing ContextClient...') - _client.close() - - LOGGER.info('Closed ContextClient...') - -@pytest.fixture(scope='session') -def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument - LOGGER.info('Initializing DeviceService...') - driver_factory = DriverFactory(DRIVERS) - driver_instance_cache = DriverInstanceCache(driver_factory) - _service = DeviceService(driver_instance_cache) - _service.start() - - # yield the server, when test finishes, execution will resume to stop it - LOGGER.info('Yielding DeviceService...') - yield _service - - LOGGER.info('Terminating DeviceService...') - _service.stop() - - LOGGER.info('Terminated DeviceService...') - -@pytest.fixture(scope='session') -def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument - LOGGER.info('Initializing DeviceClient...') - _client = DeviceClient() - - LOGGER.info('Yielding DeviceClient...') - yield _client - - LOGGER.info('Closing DeviceClient...') - _client.close() - - LOGGER.info('Closed DeviceClient...') - -@pytest.fixture(scope='session') -def telemetryFrontend_service( - context_service : MockContextService, - device_service : DeviceService - ): +def telemetryFrontend_service(): LOGGER.info('Initializing TelemetryFrontendService...') - name_mapping = NameMapping() - _service = TelemetryFrontendService(name_mapping) + _service = TelemetryFrontendService() _service.start() # yield the server, when test finishes, execution will resume to stop it @@ -168,37 +79,73 @@ def telemetryFrontend_client( # Tests Implementation of Telemetry Frontend ########################### -def test_verify_db_and_table(): - LOGGER.info(' >>> test_verify_database_and_tables START: <<< ') - _engine = TelemetryEngine.get_engine() - managementDB.create_database(_engine) - managementDB.create_tables(_engine) +# ------- Re-structuring Test --------- +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) +# ----- core funtionality test ----- def test_StartCollector(telemetryFrontend_client): LOGGER.info(' >>> test_StartCollector START: <<< ') response = telemetryFrontend_client.StartCollector(create_collector_request()) LOGGER.debug(str(response)) assert isinstance(response, CollectorId) -def test_run_kafka_listener(): - LOGGER.info(' >>> test_run_kafka_listener START: <<< ') - name_mapping = NameMapping() - TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping) - response = TelemetryFrontendServiceObj.run_kafka_listener() # Method "run_kafka_listener" is not define in frontend.proto - LOGGER.debug(str(response)) - assert isinstance(response, bool) - def test_StopCollector(telemetryFrontend_client): LOGGER.info(' >>> test_StopCollector START: <<< ') - _collector_id = telemetryFrontend_client.StartCollector(create_collector_request()) - time.sleep(3) # wait for small amount before call the stopCollecter() - response = telemetryFrontend_client.StopCollector(_collector_id) + response = telemetryFrontend_client.StopCollector(create_collector_id()) LOGGER.debug(str(response)) assert isinstance(response, Empty) -def test_select_collectors(telemetryFrontend_client): - LOGGER.info(' >>> test_select_collector requesting <<< ') +def test_SelectCollectors(telemetryFrontend_client): + LOGGER.info(' >>> test_SelectCollectors START: <<< ') response = telemetryFrontend_client.SelectCollectors(create_collector_filter()) - LOGGER.info('Received Rows after applying Filter: {:} '.format(response)) LOGGER.debug(str(response)) - assert isinstance(response, CollectorList) \ No newline at end of file + assert isinstance(response, CollectorList) + +# ----- Non-gRPC method tests ----- +def test_RunResponseListener(): + LOGGER.info(' >>> test_RunResponseListener START: <<< ') + TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl() + response = TelemetryFrontendServiceObj.RunResponseListener() # becasue Method "run_kafka_listener" is not define in frontend.proto + LOGGER.debug(str(response)) + assert isinstance(response, bool) + +# ------- previous test ---------------- + +# def test_verify_db_and_table(): +# LOGGER.info(' >>> test_verify_database_and_tables START: <<< ') +# _engine = TelemetryEngine.get_engine() +# managementDB.create_database(_engine) +# managementDB.create_tables(_engine) + +# def test_StartCollector(telemetryFrontend_client): +# LOGGER.info(' >>> test_StartCollector START: <<< ') +# response = telemetryFrontend_client.StartCollector(create_collector_request()) +# LOGGER.debug(str(response)) +# assert isinstance(response, CollectorId) + +# def test_run_kafka_listener(): +# LOGGER.info(' >>> test_run_kafka_listener START: <<< ') +# name_mapping = NameMapping() +# TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping) +# response = TelemetryFrontendServiceObj.run_kafka_listener() # Method "run_kafka_listener" is not define in frontend.proto +# LOGGER.debug(str(response)) +# assert isinstance(response, bool) + +# def test_StopCollector(telemetryFrontend_client): +# LOGGER.info(' >>> test_StopCollector START: <<< ') +# _collector_id = telemetryFrontend_client.StartCollector(create_collector_request()) +# time.sleep(3) # wait for small amount before call the stopCollecter() +# response = telemetryFrontend_client.StopCollector(_collector_id) +# LOGGER.debug(str(response)) +# assert isinstance(response, Empty) + +# def test_select_collectors(telemetryFrontend_client): +# LOGGER.info(' >>> test_select_collector requesting <<< ') +# response = telemetryFrontend_client.SelectCollectors(create_collector_filter()) +# LOGGER.info('Received Rows after applying Filter: {:} '.format(response)) +# LOGGER.debug(str(response)) +# assert isinstance(response, CollectorList) \ No newline at end of file diff --git a/src/telemetry/telemetry_virenv.txt b/src/telemetry/telemetry_virenv.txt deleted file mode 100644 index e39f80b6593d6c41411751cdd0ea59ee05344570..0000000000000000000000000000000000000000 --- a/src/telemetry/telemetry_virenv.txt +++ /dev/null @@ -1,49 +0,0 @@ -anytree==2.8.0 -APScheduler==3.10.1 -attrs==23.2.0 -certifi==2024.2.2 -charset-normalizer==2.0.12 -colorama==0.4.6 -confluent-kafka==2.3.0 -coverage==6.3 -future-fstrings==1.2.0 -greenlet==3.0.3 -grpcio==1.47.5 -grpcio-health-checking==1.47.5 -grpcio-tools==1.47.5 -grpclib==0.4.4 -h2==4.1.0 -hpack==4.0.0 -hyperframe==6.0.1 -idna==3.7 -influx-line-protocol==0.1.4 -iniconfig==2.0.0 -kafka-python==2.0.2 -multidict==6.0.5 -networkx==3.3 -packaging==24.0 -pluggy==1.5.0 -prettytable==3.5.0 -prometheus-client==0.13.0 -protobuf==3.20.3 -psycopg2-binary==2.9.3 -py==1.11.0 -py-cpuinfo==9.0.0 -pytest==6.2.5 -pytest-benchmark==3.4.1 -pytest-depends==1.0.1 -python-dateutil==2.8.2 -python-json-logger==2.0.2 -pytz==2024.1 -questdb==1.0.1 -requests==2.27.1 -six==1.16.0 -SQLAlchemy==1.4.52 -sqlalchemy-cockroachdb==1.4.4 -SQLAlchemy-Utils==0.38.3 -toml==0.10.2 -typing_extensions==4.12.0 -tzlocal==5.2 -urllib3==1.26.18 -wcwidth==0.2.13 -xmltodict==0.12.0 diff --git a/src/telemetry/database/tests/messages.py b/src/telemetry/tests/messages.py similarity index 100% rename from src/telemetry/database/tests/messages.py rename to src/telemetry/tests/messages.py diff --git a/src/telemetry/tests/test_telemetryDB.py b/src/telemetry/tests/test_telemetryDB.py new file mode 100644 index 0000000000000000000000000000000000000000..c4976f8c2144fcdcad43a3e25d43091010de0d18 --- /dev/null +++ b/src/telemetry/tests/test_telemetryDB.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from telemetry.database.Telemetry_DB import TelemetryDB + +LOGGER = logging.getLogger(__name__) + +def test_verify_databases_and_tables(): + LOGGER.info('>>> test_verify_databases_and_tables : START <<< ') + TelemetryDBobj = TelemetryDB() + TelemetryDBobj.drop_database() + TelemetryDBobj.verify_tables() + TelemetryDBobj.create_database() + TelemetryDBobj.create_tables() + TelemetryDBobj.verify_tables() \ No newline at end of file diff --git a/src/tests/tools/mock_qkd_nodes/YangValidator.py b/src/tests/tools/mock_qkd_nodes/YangValidator.py new file mode 100644 index 0000000000000000000000000000000000000000..2056d5df64a1d841fc74c1be73aa6408051ab738 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/YangValidator.py @@ -0,0 +1,42 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import libyang, os +from typing import Dict, Optional + +YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang') + +class YangValidator: + def __init__(self, main_module : str, dependency_modules : [str]) -> None: + self._yang_context = libyang.Context(YANG_DIR) + + self._yang_module = self._yang_context.load_module(main_module) + mods = [self._yang_context.load_module(mod) for mod in dependency_modules] + [self._yang_module] + + for mod in mods: + mod.feature_enable_all() + + + + def parse_to_dict(self, message : Dict) -> Dict: + dnode : Optional[libyang.DNode] = self._yang_module.parse_data_dict( + message, validate_present=True, validate=True, strict=True + ) + if dnode is None: raise Exception('Unable to parse Message({:s})'.format(str(message))) + message = dnode.print_dict() + dnode.free() + return message + + def destroy(self) -> None: + self._yang_context.destroy() diff --git a/src/tests/tools/mock_qkd_nodes/mock.py b/src/tests/tools/mock_qkd_nodes/mock.py new file mode 100644 index 0000000000000000000000000000000000000000..7a606f6cac855fee9852f620c595908fbb3d36da --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/mock.py @@ -0,0 +1,368 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from flask import Flask, request +from YangValidator import YangValidator + +app = Flask(__name__) + + +yang_validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types']) + + +nodes = { + '127.0.0.1:11111': {'node': { + 'qkdn_id': '00000001-0000-0000-0000-000000000000', + }, + 'qkdn_capabilities': { + }, + 'qkd_applications': { + 'qkd_app': [ + { + 'app_id': '00000001-0001-0000-0000-000000000000', + 'client_app_id': [], + 'app_statistics': { + 'statistics': [] + }, + 'app_qos': { + }, + 'backing_qkdl_id': [] + } + ] + }, + 'qkd_interfaces': { + 'qkd_interface': [ + { + 'qkdi_id': '100', + 'qkdi_att_point': { + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '101', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'1001' + }, + 'qkdi_capabilities': { + } + } + ] + }, + 'qkd_links': { + 'qkd_link': [ + + ] + } + }, + + '127.0.0.1:22222': {'node': { + 'qkdn_id': '00000002-0000-0000-0000-000000000000', + }, + 'qkdn_capabilities': { + }, + 'qkd_applications': { + 'qkd_app': [ + { + 'app_id': '00000002-0001-0000-0000-000000000000', + 'client_app_id': [], + 'app_statistics': { + 'statistics': [] + }, + 'app_qos': { + }, + 'backing_qkdl_id': [] + } + ] + }, + 'qkd_interfaces': { + 'qkd_interface': [ + { + 'qkdi_id': '200', + 'qkdi_att_point': { + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '201', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'2001' + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '202', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'2002' + }, + 'qkdi_capabilities': { + } + } + ] + }, + 'qkd_links': { + 'qkd_link': [ + + ] + } + }, + + '127.0.0.1:33333': {'node': { + 'qkdn_id': '00000003-0000-0000-0000-000000000000', + }, + 'qkdn_capabilities': { + }, + 'qkd_applications': { + 'qkd_app': [ + { + 'app_id': '00000003-0001-0000-0000-000000000000', + 'client_app_id': [], + 'app_statistics': { + 'statistics': [] + }, + 'app_qos': { + }, + 'backing_qkdl_id': [] + } + ] + }, + 'qkd_interfaces': { + 'qkd_interface': [ + { + 'qkdi_id': '300', + 'qkdi_att_point': { + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '301', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'3001' + }, + 'qkdi_capabilities': { + } + } + ] + }, + 'qkd_links': { + 'qkd_link': [ + + ] + } + } +} + + +def get_side_effect(url): + + steps = url.lstrip('https://').lstrip('http://').rstrip('/') + ip_port, _, _, header, *steps = steps.split('/') + + header_splitted = header.split(':') + + module = header_splitted[0] + assert(module == 'etsi-qkd-sdn-node') + + tree = {'qkd_node': nodes[ip_port]['node'].copy()} + + if len(header_splitted) == 1 or not header_splitted[1]: + value = nodes[ip_port].copy() + value.pop('node') + tree['qkd_node'].update(value) + + return tree, tree + + root = header_splitted[1] + assert(root == 'qkd_node') + + if not steps: + return tree, tree + + + endpoint, *steps = steps + + value = nodes[ip_port][endpoint] + + if not steps: + return_value = {endpoint:value} + tree['qkd_node'].update(return_value) + + return return_value, tree + + + + ''' + element, *steps = steps + + container, key = element.split('=') + + # value = value[container][key] + + if not steps: + return_value['qkd_node'][endpoint] = [value] + return return_value + + ''' + raise Exception('Url too long') + + + +def edit(from_dict, to_dict, create): + for key, value in from_dict.items(): + if isinstance(value, dict): + if key not in to_dict and create: + to_dict[key] = {} + edit(from_dict[key], to_dict[key], create) + elif isinstance(value, list): + to_dict[key].extend(value) + else: + to_dict[key] = value + + + +def edit_side_effect(url, json, create): + steps = url.lstrip('https://').lstrip('http://').rstrip('/') + ip_port, _, _, header, *steps = steps.split('/') + + module, root = header.split(':') + + assert(module == 'etsi-qkd-sdn-node') + assert(root == 'qkd_node') + + if not steps: + edit(json, nodes[ip_port]['node']) + return + + endpoint, *steps = steps + + if not steps: + edit(json[endpoint], nodes[ip_port][endpoint], create) + return + + + ''' + element, *steps = steps + + container, key = element.split('=') + + if not steps: + if key not in nodes[ip_port][endpoint][container] and create: + nodes[ip_port][endpoint][container][key] = {} + + edit(json, nodes[ip_port][endpoint][container][key], create) + return 0 + ''' + + raise Exception('Url too long') + + + + + + +@app.get('/', defaults={'path': ''}) +@app.get("/<string:path>") +@app.get('/<path:path>') +def get(path): + msg, msg_validate = get_side_effect(request.base_url) + print(msg_validate) + yang_validator.parse_to_dict(msg_validate) + return msg + + +@app.post('/', defaults={'path': ''}) +@app.post("/<string:path>") +@app.post('/<path:path>') +def post(path): + success = True + reason = '' + try: + edit_side_effect(request.base_url, request.json, True) + except Exception as e: + reason = str(e) + success = False + return {'success': success, 'reason': reason} + + + +@app.route('/', defaults={'path': ''}, methods=['PUT', 'PATCH']) +@app.route("/<string:path>", methods=['PUT', 'PATCH']) +@app.route('/<path:path>', methods=['PUT', 'PATCH']) +def patch(path): + success = True + reason = '' + try: + edit_side_effect(request.base_url, request.json, False) + except Exception as e: + reason = str(e) + success = False + return {'success': success, 'reason': reason} + + + + + +# import json +# from mock import requests +# import pyangbind.lib.pybindJSON as enc +# from pyangbind.lib.serialise import pybindJSONDecoder as dec +# from yang.sbi.qkd.templates.etsi_qkd_sdn_node import etsi_qkd_sdn_node + +# module = etsi_qkd_sdn_node() +# url = 'https://1.1.1.1/restconf/data/etsi-qkd-sdn-node:' + +# # Get node all info +# z = requests.get(url).json() +# var = dec.load_json(z, None, None, obj=module) +# print(enc.dumps(var)) + + +# Reset module variable because it is already filled +# module = etsi_qkd_sdn_node() + +# # Get node basic info +# node = module.qkd_node +# z = requests.get(url + 'qkd_node').json() +# var = dec.load_json(z, None, None, obj=node) +# print(enc.dumps(var)) + + +# # Get all apps +# apps = node.qkd_applications +# z = requests.get(url + 'qkd_node/qkd_applications').json() +# var = dec.load_json(z, None, None, obj=apps) +# print(enc.dumps(var)) + +# # Edit app 0 +# app = apps.qkd_app['00000000-0001-0000-0000-000000000000'] +# app.client_app_id = 'id_0' +# requests.put(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000000', json=json.loads(enc.dumps(app))) + +# # Create app 1 +# app = apps.qkd_app.add('00000000-0001-0000-0000-000000000001') +# requests.post(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000001', json=json.loads(enc.dumps(app))) + +# # Get all apps +# apps = node.qkd_applications +# z = requests.get(url + 'qkd_node/qkd_applications').json() +# var = dec.load_json(z, None, None, obj=apps) +# print(enc.dumps(var)) diff --git a/src/tests/tools/mock_qkd_nodes/start.sh b/src/tests/tools/mock_qkd_nodes/start.sh new file mode 100755 index 0000000000000000000000000000000000000000..b1bc56d5a7f90809e81c73a54803fb2dc11bacd9 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/start.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd "$(dirname "$0")" + +killbg() { + for p in "${pids[@]}" ; do + kill "$p"; + done +} + +trap killbg EXIT +pids=() +flask --app mock run --host 0.0.0.0 --port 11111 & +pids+=($!) +flask --app mock run --host 0.0.0.0 --port 22222 & +pids+=($!) +flask --app mock run --host 0.0.0.0 --port 33333 diff --git a/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang new file mode 100644 index 0000000000000000000000000000000000000000..04bbd8a875445a9bcf19266f21b792439bf9005c --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang @@ -0,0 +1,326 @@ +/* Copyright 2022 ETSI +Licensed under the BSD-3 Clause (https://forge.etsi.org/legal-matters) */ + +module etsi-qkd-node-types { + + yang-version "1"; + + namespace "urn:etsi:qkd:yang:etsi-qkd-node-types"; + + prefix "etsi-qkdn-types"; + + organization "ETSI ISG QKD"; + + contact + "https://www.etsi.org/committee/qkd + vicente@fi.upm.es"; + + description + "This module contains the base types created for + the software-defined QKD node information models + specified in ETSI GS QKD 015 V2.1.1 + - QKD-TECHNOLOGY-TYPES + - QKDN-STATUS-TYPES + - QKD-LINK-TYPES + - QKD-ROLE-TYPES + - QKD-APP-TYPES + - Wavelength + "; + + revision "2022-01-30" { + description + "Refinement of the YANG model to make it compatible with the ETSI ISG QKD 018. Minor fixes."; + } + + revision "2020-09-30" { + description + "First definition based on initial requirement analysis."; + } + + identity QKD-TECHNOLOGY-TYPES { + description "Quantum Key Distribution System base technology types."; + } + + identity CV-QKD { + base QKD-TECHNOLOGY-TYPES; + description "Continuous Variable base technology."; + } + + identity DV-QKD { + base QKD-TECHNOLOGY-TYPES; + description "Discrete Variable base technology."; + } + + identity DV-QKD-COW { + base QKD-TECHNOLOGY-TYPES; + description "COW base technology."; + } + + identity DV-QKD-2Ws { + base QKD-TECHNOLOGY-TYPES; + description "2-Ways base technology."; + } + + typedef qkd-technology-types { + type identityref { + base QKD-TECHNOLOGY-TYPES; + } + description "This type represents the base technology types of the SD-QKD system."; + } + + identity QKDN-STATUS-TYPES { + description "Base identity used to identify the SD-QKD node status."; + } + + identity NEW { + base QKDN-STATUS-TYPES; + description "The QKD node is installed."; + } + + identity OPERATING { + base QKDN-STATUS-TYPES; + description "The QKD node is up."; + } + + identity DOWN { + base QKDN-STATUS-TYPES; + description "The QKD node is not working as expected."; + } + + identity FAILURE { + base QKDN-STATUS-TYPES; + description "The QKD node cannot be accessed by SDN controller with communication failure."; + } + + identity OUT { + base QKDN-STATUS-TYPES; + description "The QKD node is switched off and uninstalled."; + } + + typedef qkdn-status-types { + type identityref { + base QKDN-STATUS-TYPES; + } + description "This type represents the status of the SD-QKD node."; + } + + identity QKD-LINK-TYPES { + description "QKD key association link types."; + } + + identity VIRT { + base QKD-LINK-TYPES; + description "Virtual Link."; + } + + identity PHYS { + base QKD-LINK-TYPES; + description "Physical Link."; + } + + typedef qkd-link-types { + type identityref { + base QKD-LINK-TYPES; + } + description "This type represents the key association link type between two SD-QKD nodes."; + } + + identity QKD-ROLE-TYPES { + description "QKD Role Type."; + } + + identity TRANSMITTER { + base QKD-ROLE-TYPES; + description "QKD module working as transmitter."; + } + + identity RECEIVER { + base QKD-ROLE-TYPES; + description "QKD module working as receiver."; + } + + identity TRANSCEIVER { + base QKD-ROLE-TYPES; + description "QKD System that can work as a transmitter or receiver."; + } + + typedef qkd-role-types { + type identityref { + base QKD-ROLE-TYPES; + } + description "This type represents the working mode of a SD-QKD module."; + } + + identity QKD-APP-TYPES { + description "Application types."; + } + + identity CLIENT { + base QKD-APP-TYPES; + description "Application working as client."; + } + + identity INTERNAL { + base QKD-APP-TYPES; + description "Internal QKD node application."; + } + + typedef qkd-app-types { + type identityref { + base QKD-APP-TYPES; + } + description "This type represents the application class consuming key from SD-QKD nodes."; + } + + identity PHYS-PERF-TYPES { + description "Physical performance types."; + } + + identity QBER { + base PHYS-PERF-TYPES; + description "Quantum Bit Error Rate."; + } + + identity SNR { + base PHYS-PERF-TYPES; + description "Signal to Noise Ratio."; + } + + typedef phys-perf-types { + type identityref { + base PHYS-PERF-TYPES; + } + description "This type represents physical performance types."; + } + + identity LINK-STATUS-TYPES { + description "Status of the key association QKD link (physical and virtual)."; + } + + identity ACTIVE { + base LINK-STATUS-TYPES; + description "Link actively generating keys."; + } + + identity PASSIVE { + base LINK-STATUS-TYPES; + description "No key generation on key association QKD link but a pool of keys + are still available."; + } + + identity PENDING { + base LINK-STATUS-TYPES; + description "Waiting for activation and no keys are available."; + } + + identity OFF { + base LINK-STATUS-TYPES; + description "No key generation and no keys are available."; + } + + typedef link-status-types { + type identityref { + base LINK-STATUS-TYPES; + } + description "This type represents the status of a key association QKD link, both physical and virtual."; + } + + /// + + identity IFACE-STATUS-TYPES { + description "Interface Status."; + } + + identity ENABLED { + base IFACE-STATUS-TYPES; + description "The interfaces is up."; + } + + identity DISABLED { + base IFACE-STATUS-TYPES; + description "The interfaces is down."; + } + + identity FAILED { + base IFACE-STATUS-TYPES; + description "The interfaces has failed."; + } + + typedef iface-status-types { + type identityref { + base IFACE-STATUS-TYPES; + } + description "This type represents the status of a interface between a SD-QKD node and a SD-QKD module."; + } + + identity APP-STATUS-TYPES { + description "Application types."; + } + + identity ON { + base APP-STATUS-TYPES; + description "The application is on."; + } + + identity DISCONNECTED { + base APP-STATUS-TYPES; + description "The application is disconnected."; + } + + identity OUT-OF-TIME { + base APP-STATUS-TYPES; + description "The application is out of time."; + } + + identity ZOMBIE { + base APP-STATUS-TYPES; + description "The application is in a zombie state."; + } + + typedef app-status-types { + type identityref { + base APP-STATUS-TYPES; + } + description "This type represents the status of an application consuming key from SD-QKD nodes."; + } + + identity SEVERITY-TYPES { + description "Error/Failure severity levels."; + } + + identity MAJOR { + base SEVERITY-TYPES; + description "Major error/failure."; + } + + identity MINOR { + base SEVERITY-TYPES; + description "Minor error/failure."; + } + + typedef severity-types { + type identityref { + base SEVERITY-TYPES; + } + description "This type represents the Error/Failure severity levels."; + } + + typedef wavelength { + type string { + pattern "([1-9][0-9]{0,3})"; + } + description + "A WDM channel number (starting at 1). For example: 20"; + } + + //Pattern from "A Yang Data Model for WSON Optical Networks". + typedef wavelength-range-type { + type string { + pattern "([1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?" + + "(,[1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?)*)"; + } + description + "A list of WDM channel numbers (starting at 1) + in ascending order. For example: 1,12-20,40,50-80"; + } +} diff --git a/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang new file mode 100644 index 0000000000000000000000000000000000000000..d07004cdc5b558adc5a9c0b6acb32adac0d7cc11 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang @@ -0,0 +1,941 @@ +/* Copyright 2022 ETSI +Licensed under the BSD-3 Clause (https://forge.etsi.org/legal-matters) */ + +module etsi-qkd-sdn-node { + + yang-version "1"; + + namespace "urn:etsi:qkd:yang:etsi-qkd-node"; + + prefix "etsi-qkdn"; + + import ietf-yang-types { prefix "yang"; } + import ietf-inet-types { prefix "inet"; } + import etsi-qkd-node-types { prefix "etsi-qkdn-types"; } + + // meta + organization "ETSI ISG QKD"; + + contact + "https://www.etsi.org/committee/qkd + vicente@fi.upm.es"; + + description + "This module contains the groupings and containers composing + the software-defined QKD node information models + specified in ETSI GS QKD 015 V2.1.1"; + + revision "2022-01-30" { + description + "Refinement of the YANG model to make it compatible with the ETSI ISG QKD 018. Minor fixes."; + reference + "ETSI GS QKD 015 V2.1.1 (2022-01)"; + } + + revision "2020-09-30" { + description + "First definition based on initial requirement analysis."; + reference + "ETSI GS QKD 015 V1.1.1 (2021-03)"; + } + + grouping qkdn_id { + description "Grouping of qkdn_id leaf."; + + leaf qkdn_id { + type yang:uuid; + mandatory true; + description + "This value reflects the unique ID of the SD-QKD node."; + } + } + + grouping qkdn_version { + description "Grouping of qkdn_version leaf."; + + leaf qkdn_version { + type string; + description "Hardware or software version of the SD-QKD node."; + } + } + + grouping qkdn_location_id { + description "Grouping of qkdn_location_id leaf."; + + leaf qkdn_location_id { + type string; + default ""; + description + "This value enables the location of the secure + area that contains the SD-QKD node to be specified."; + } + } + + grouping qkdn_status { + description "Grouping of qkdn_status leaf."; + + leaf qkdn_status { + type etsi-qkdn-types:qkdn-status-types; + config false; + description "Status of the SD-QKD node."; + } + } + + grouping qkdn_capabilities { + description "Grouping of the capabilities of the SD-QKD node."; + + container qkdn_capabilities { + description "Capabilities of the SD-QKD node."; + + leaf link_stats_support { + type boolean; + default true; + description + "If true, this node exposes link-related statistics (secure key + generation rate-SKR, link consumption, status, QBER)."; + } + + leaf application_stats_support { + type boolean; + default true; + description "If true, this node exposes application related + statistics (application consumption, alerts)."; + } + + leaf key_relay_mode_enable { + type boolean; + default true; + description "If true, this node supports key relay (multi-hop) mode services."; + } + } + } + + grouping app_id { + description "Grouping of app_id leaf."; + + leaf app_id { + type yang:uuid; + description + "Unique ID that identifies a QKD application consisting of a set of entities + that are allowed to receive keys shared with each other from the SD-QKD nodes + they connect to. This value is similar to a key ID or key handle."; + } + } + + grouping app_basic { + description "Grouping of app's basic parameters."; + + uses app_id; + + leaf app_status { + type etsi-qkdn-types:app-status-types; + config false; + description "Status of the application."; + } + } + + grouping app_priority { + description "Grouping of app_priority leaf."; + + leaf app_priority { + type uint32; + default 0; + description "Priority of the association/application + might be defined by the user but usually + handled by a network administrator."; + } + } + + grouping app_details { + description "Grouping of app's details parameters."; + + leaf app_type { + type etsi-qkdn-types:qkd-app-types; + description "Type of the registered application. These + values, defined within the types module, can be client + (if an external applications requesting keys) + or internal (application is defined to maintain + the QKD - e.g. multi-hop, authentication or + other encryption operations)."; + } + + leaf server_app_id { + type inet:uri; + description "ID that identifies the entity that initiated the + creation of the QKD application to receive keys shared with one + or more specified target entity identified by client_app_id. + It is a client in the interface to the SD-QKD node and the name + server_app_id reflects that it requested the QKD application to + be initiated."; + } + + leaf-list client_app_id { + type inet:uri; + description "List of IDs that identifies the one or more + entities that are allowed to receive keys from SD-QKD + node(s) under the QKD application in addition to the + initiating entity identified by server_app_id."; + } + + uses app_priority; + } + + grouping local_qkdn_id { + description "Grouping of local_qkdn_id leaf."; + + leaf local_qkdn_id { + type yang:uuid; + description "Unique ID of the local SD-QKD node which + is providing QKD keys to the local application."; + } + } + + grouping app_time { + description "Grouping of app's time parameters."; + + leaf creation_time { + type yang:date-and-time; + config false; + description "Date and time of the service creation."; + } + + leaf expiration_time { + type yang:date-and-time; + description "Date and time of the service expiration."; + } + } + + grouping app_statistics { + description "Grouping of app's statistic parameters."; + + container app_statistics { + description "Statistical information relating to a specific statistic period of time."; + + list statistics { + key "end_time"; + config false; + description "List of statistics."; + + leaf end_time { + type yang:date-and-time; + config false; + description "End time for the statistic period."; + } + + leaf start_time { + type yang:date-and-time; + config false; + description "Start time for the statistic period."; + } + + leaf consumed_bits { + type uint32; + config false; + description "Consumed secret key amount (in bits) for a statistics collection period of time."; + } + } + } + } + + grouping app_qos { + description "Grouping of app's basic qos parameters."; + + container app_qos { + description "Requested Quality of Service."; + + leaf max_bandwidth { + type uint32; + description "Maximum bandwidth (in bits per second) allowed for + this specific application. Exceeding this value will raise an + error from the local key store to the appl. This value might + be internally configured (or by an admin) with a default value."; + } + + leaf min_bandwidth { + type uint32; + description "This value is an optional QoS parameter which + enables to require a minimum key rate (in bits per second) + for the application."; + } + + leaf jitter { + type uint32; + description "This value allows to specify the maximum jitter + (in msec) to be provided by the key delivery API for + applications requiring fast rekeying. This value can be + coordinated with the other QoS to provide a wide enough + QoS definition."; + } + + leaf ttl { + type uint32; + description "This value is used to specify the maximum time + (in seconds) that a key could be kept in the key store for + a given application without being used."; + } + } + } + + grouping augmented_app_qos { + description "Grouping of app's detailed qos parameters."; + + uses app_qos { + augment app_qos { + description "Augmentation of app's basic parameters with app's detailed qos parameters."; + + leaf clients_shared_path_enable { + type boolean; + default false; + description "If true, multiple clients for this + application might share keys to reduce service + impact (consumption)."; + } + + leaf clients_shared_keys_required { + type boolean; + default false; + description "If true, multiple clients for this application + might share keys to reduce service impact (consumption)."; + } + } + } + } + + grouping qkd_applications { + description "Grouping of the list of applications container."; + + container qkd_applications { + description "List of applications container."; + + list qkd_app { + key "app_id"; + description "List of applications that are currently registered + in the SD-QKD node. Any entity consuming QKD-derived keys (either + for internal or external purposes) is considered an application."; + + uses app_basic; + + uses app_details; + + uses app_time; + + uses app_statistics; + + uses augmented_app_qos; + + leaf-list backing_qkdl_id { + type yang:uuid; + description "Unique ID of the key association link which is + providing QKD keys to these applications."; + } + + uses local_qkdn_id; + + leaf remote_qkdn_id { + type yang:uuid; + description "Unique ID of the remote SD-QKD node which + is providing QKD keys to the remote application. + While unknown, the local SD-QKD will not be able to + provide keys to the local application."; + } + } + } + } + + grouping qkdi_status { + description "Grouping of qkdi_status leaf."; + + leaf qkdi_status { + type etsi-qkdn-types:iface-status-types; + config false; + description "Status of a QKD interface of the SD-QKD node."; + } + } + + grouping qkdi_model { + description "Grouping of qkdi_model leaf."; + + leaf qkdi_model { + type string; + description "Device model (vendor/device)."; + } + } + + grouping qkdi_type { + description "Grouping of qkdi_type leaf."; + + leaf qkdi_type { + type etsi-qkdn-types:qkd-technology-types; + description "Interface type (QKD technology)."; + } + } + + grouping qkdi_att_point { + description "Grouping of the interface attachment points to an optical switch."; + + container qkdi_att_point { + description "Interface attachment point to an optical switch."; + + leaf device { + type string; + description "Unique ID of the optical switch (or + passive component) to which the interface is connected."; + } + + leaf port { + type uint32; + description "Port ID of the device to which the interface + is connected."; + } + } + } + + grouping qkdi_id { + description "Grouping of qkdi_id leaf."; + + leaf qkdi_id { + type uint32; + description "Interface id. It is described as a locally unique number, + which is globally unique when combined with the SD-QKD node ID."; + } + } + + grouping qkd_interface_item { + description "Grouping of the interface parameters."; + + uses qkdi_id; + + uses qkdi_model; + + uses qkdi_type; + + uses qkdi_att_point; + + container qkdi_capabilities { + description "Capabilities of the QKD system (interface)."; + + leaf role_support { + type etsi-qkdn-types:qkd-role-types; + description "QKD node support for key relay mode services."; + } + + leaf wavelength_range { + type etsi-qkdn-types:wavelength-range-type; + description "Range of supported wavelengths (nm) (multiple + if it contains a tunable laser)."; + } + + leaf max_absorption { + type decimal64 { + fraction-digits 3; + } + description "Maximum absorption supported (in dB)."; + } + } + } + + grouping qkd_interfaces { + description "Grouping of the list of interfaces."; + + container qkd_interfaces { + description "List of interfaces container."; + + list qkd_interface { + key "qkdi_id"; + description "List of physical QKD modules in a secure location, + abstracted as interfaces of the SD-QKD node."; + + uses qkd_interface_item; + + uses qkdi_status; + + } + } + } + + grouping qkdl_id { + description "Grouping of qkdl_id leaf."; + + leaf qkdl_id { + type yang:uuid; + description "Unique ID of the QKD link (key association)."; + } + } + + grouping qkdl_status { + description "Grouping of qkdl_status leaf."; + + leaf qkdl_status { + type etsi-qkdn-types:link-status-types; + description "Status of the QKD key association link."; + } + } + + grouping common_performance { + description "Grouping of common performance parameters."; + + leaf expected_consumption { + type uint32; + config false; + description "Sum of all the application's bandwidth (in bits per + second) on this particular key association link."; + } + + leaf skr { + type uint32; + config false; + description "Secret key rate generation (in bits per second) + of the key association link."; + } + + leaf eskr { + type uint32; + config false; + description "Effective secret key rate (in bits per second) generation + of the key association link available after internal consumption."; + } + } + + grouping physical_link_perf { + description "Grouping of the list of physical performance parameters."; + + list phys_perf { + key "perf_type"; + config false; + description "List of physical performance parameters."; + + leaf perf_type { + type etsi-qkdn-types:phys-perf-types; + config false; + description "Type of the physical performance value to be + exposed to the controller."; + } + + leaf value { + type decimal64 { + fraction-digits 3; + } + config false; + description "Numerical value for the performance parameter + type specified above."; + } + } + } + + grouping virtual_link_spec { + description "Grouping of the virtual link's parameters."; + + leaf virt_prev_hop { + type yang:uuid; + description "Previous hop in a multi-hop/virtual key + association link config."; + } + + leaf-list virt_next_hop { + type yang:uuid; + description "Next hop(s) in a multihop/virtual key + association link config. Defined as a list for multicast + over shared sub-paths."; + } + + leaf virt_bandwidth { + type uint32; + description "Required bandwidth (in bits per second) for that key association link. + Used to reserve bandwidth from the physical QKD links to support the virtual key + association link as an internal application."; + } + } + + grouping physical_link_spec { + description "Grouping of the physical link's parameters."; + + leaf phys_channel_att { + type decimal64 { + fraction-digits 3; + } + description "Expected attenuation on the quantum channel (in dB) + between the Source/qkd_node and Destination/qkd_node."; + + } + + leaf phys_wavelength { + type etsi-qkdn-types:wavelength; + description "Wavelength (in nm) to be used for the quantum channel. + If the interface is not tunable, this configuration could be bypassed"; + } + + leaf phys_qkd_role { + type etsi-qkdn-types:qkd-role-types; + description "Transmitter/receiver mode for the QKD module. + If there is no multi-role support, this could be ignored."; + } + } + + grouping qkd_links { + description "Grouping of the list of links."; + + container qkd_links { + description "List of links container"; + + list qkd_link { + key "qkdl_id"; + description "List of (key association) links to other SD-QKD nodes in the network. + The links can be physical (direct quantum channel) or virtual multi-hop + connection doing key-relay through several nodes."; + + uses qkdl_id; + + uses qkdl_status; + + leaf qkdl_enable { + type boolean; + default true; + description "This value allows to enable of disable the key generation + process for a given link."; + + } + + container qkdl_local { + description "Source (local) node of the SD-QKD link."; + + leaf qkdn_id { + type yang:uuid; + description "Unique ID of the local SD-QKD node."; + } + + leaf qkdi_id { + type uint32; + description "Interface used to create the key association link."; + } + } + + container qkdl_remote { + description "Destination (remote) unique SD-QKD node."; + + leaf qkdn_id { + type yang:uuid; + description "Unique ID of the remote SD-QKD node. This value is + provided by the SDN controller when the key association link + request arrives."; + } + + leaf qkdi_id { + type uint32; + description "Interface used to create the link."; + } + } + + leaf qkdl_type { + type etsi-qkdn-types:qkd-link-types; + description "Key Association Link type: Virtual (multi-hop) or Direct."; + } + + leaf-list qkdl_applications { + type yang:uuid; + description "Applications which are consuming keys from + this key association link."; + } + + uses virtual_link_spec { + when "qkdl_type = 'etsi-qkd-node-types:VIRT'" { + description "Virtual key association link specific configuration."; + } + } + + uses physical_link_spec { + when "qkdl_type = 'etsi-qkd-node-types:PHYS'" { + description "Physical key association link specific configuration."; + } + } + + container qkdl_performance { + description "Container of link's performace parameters."; + + uses common_performance; + + uses physical_link_perf { + when "../qkdl_type = 'PHYS'" { + description "Performance of the specific physical link."; + } + } + } + } + } + } + + container qkd_node { + description + "Top module describing a software-defined QKD node (SD-QKD node)."; + + uses qkdn_id; + + uses qkdn_status; + + uses qkdn_version; + + uses qkdn_location_id; + + uses qkdn_capabilities; + + uses qkd_applications; + + uses qkd_interfaces; + + uses qkd_links; + } + + grouping message { + description "Grouping of message leaf."; + + leaf message { + type string; + description "Placeholder for the message."; + } + } + + grouping severity { + description "Grouping of severity leaf."; + + leaf severity { + type etsi-qkdn-types:severity-types; + description "Placeholder for the severity."; + } + } + + grouping reason { + description "Grouping of reason leaf."; + + leaf reason { + type string; + description "Auxiliary parameter to include additional + information about the reason for link failure."; + } + } + + notification sdqkdn_application_new { + description "Defined for the controller to detect new applications + requesting keys from a QKD node. This maps with the workflow shown + in clause 5.2 'QKD Application Registration'. Parameters such as + client and server app IDs, local QKD node identifier, priority and + QoS are sent in the notification."; + + container qkd_application { + description "'sdqkdn_application_new' notification's qkd_application parameters."; + + uses app_details; + + uses local_qkdn_id; + + uses augmented_app_qos; + + } + } + + notification sdqkdn_application_qos_update { + description "Notification that includes information about priority or + QoS changes on an existing and already registered application."; + + container qkd_application { + description "'sdqkdn_application_qos_update' notification's qkd_application parameters."; + + uses app_id; + + uses augmented_app_qos; + + uses app_priority; + + } + } + + notification sdqkdn_application_disconnected { + description "Includes the application identifier to inform that the + application is no longer registered and active in the QKD node."; + + container qkd_application { + description "'sdqkdn_application_disconnected' notification's qkd_application parameters."; + + uses app_id; + + } + } + + notification sdqkdn_interface_new { + description "Includes all the information about the new QKD system + installed in the secure location of a given QKD node."; + + container qkd_interface { + description "'sdqkdn_interface_new' notification's qkd_interface parameters."; + + uses qkd_interface_item; + + } + } + + notification sdqkdn_interface_down { + description "Identifies an interface within a QKD node which is not + working as expected, allowing additional information to be included + in a 'reason' string field."; + + container qkd_interface { + description "'sdqkdn_interface_down' notification's qkd_interface parameters."; + + uses qkdi_id; + + uses reason; + + } + } + + notification sdqkdn_interface_out { + description "Contains the ID of an interface which is switch off and + uninstall from a QKD node. This information can be gathered from this + notification or from regular polling from the controller's side."; + + container qkd_interface { + description "'sdqkdn_interface_out' notification's qkd_interface parameters."; + + uses qkdi_id; + + } + } + + notification sdqkdn_link_down { + description "As in the interface down event, this notification contains + the identifier of a given link which has gone down unexpectedly. + In addition, further information can be sent in the 'reason' field."; + + container qkd_link { + description "'sdqkdn_link_down' notification's qkd_link parameters."; + + uses qkdl_id; + + uses reason; + + } + } + + notification sdqkdn_link_perf_update { + description "This notification allows to inform of any mayor + modification in the performance of an active link. The identifier + of the link is sent together with the performance parameters of the link."; + + container qkd_link { + description "'sdqkdn_link_perf_update' notification's qkd_link parameters."; + + uses qkdl_id; + + container performance { + description "'sdqkdn_link_perf_update' notification's performance parameters."; + + uses common_performance; + + uses physical_link_perf; + + } + } + } + + notification sdqkdn_link_overloaded { + description "This notification is sent when the link cannot cope with the + demand. The link identifier is sent with the expected consumption and + general performance parameters."; + + container qkd_link { + description "'sdqkdn_link_overloaded' notification's qkd_link parameters."; + + uses qkdl_id; + + container performance { + description "'sdqkdn_link_overloaded' notification's performance parameters."; + + uses common_performance; + + } + } + } + + notification alarm { + description "'alarm' notification."; + + container link { + description "'alarm' notification's link parameters."; + + uses qkdl_id; + + uses qkdl_status; + + uses message; + + uses severity; + + } + + container interface { + description "'alarm' notification's interface parameters."; + + uses qkdi_id; + + uses qkdi_status; + + uses message; + + uses severity; + + } + + container application { + description "'alarm' notification's application parameters."; + + uses app_basic; + + uses message; + + uses severity; + + } + + } + + notification event { + description "'event' notification."; + + container link { + description "'alarm' notification's link parameters."; + + uses qkdl_id; + + uses qkdl_status; + + uses message; + + uses severity; + + } + + container interface { + description "'alarm' notification's interface parameters."; + + uses qkdi_id; + + uses qkdi_status; + + uses message; + + uses severity; + + } + + container application { + description "'alarm' notification's application parameters."; + + uses app_basic; + + uses message; + + uses severity; + + } + + } + +} diff --git a/src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang b/src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang new file mode 100644 index 0000000000000000000000000000000000000000..eacefb6363de1beb543567a0fa705571b7dc57a2 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang @@ -0,0 +1,458 @@ +module ietf-inet-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types"; + prefix "inet"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types for Internet addresses and related things. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - ip-address-no-zone + - ipv4-address-no-zone + - ipv6-address-no-zone"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of types related to protocol fields ***/ + + typedef ip-version { + type enumeration { + enum unknown { + value "0"; + description + "An unknown or unspecified version of the Internet + protocol."; + } + enum ipv4 { + value "1"; + description + "The IPv4 protocol as defined in RFC 791."; + } + enum ipv6 { + value "2"; + description + "The IPv6 protocol as defined in RFC 2460."; + } + } + description + "This value represents the version of the IP protocol. + + In the value set and its semantics, this type is equivalent + to the InetVersion textual convention of the SMIv2."; + reference + "RFC 791: Internet Protocol + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + typedef dscp { + type uint8 { + range "0..63"; + } + description + "The dscp type represents a Differentiated Services Code Point + that may be used for marking packets in a traffic stream. + In the value set and its semantics, this type is equivalent + to the Dscp textual convention of the SMIv2."; + reference + "RFC 3289: Management Information Base for the Differentiated + Services Architecture + RFC 2474: Definition of the Differentiated Services Field + (DS Field) in the IPv4 and IPv6 Headers + RFC 2780: IANA Allocation Guidelines For Values In + the Internet Protocol and Related Headers"; + } + + typedef ipv6-flow-label { + type uint32 { + range "0..1048575"; + } + description + "The ipv6-flow-label type represents the flow identifier or Flow + Label in an IPv6 packet header that may be used to + discriminate traffic flows. + + In the value set and its semantics, this type is equivalent + to the IPv6FlowLabel textual convention of the SMIv2."; + reference + "RFC 3595: Textual Conventions for IPv6 Flow Label + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification"; + } + + typedef port-number { + type uint16 { + range "0..65535"; + } + description + "The port-number type represents a 16-bit port number of an + Internet transport-layer protocol such as UDP, TCP, DCCP, or + SCTP. Port numbers are assigned by IANA. A current list of + all assignments is available from <http://www.iana.org/>. + + Note that the port number value zero is reserved by IANA. In + situations where the value zero does not make sense, it can + be excluded by subtyping the port-number type. + In the value set and its semantics, this type is equivalent + to the InetPortNumber textual convention of the SMIv2."; + reference + "RFC 768: User Datagram Protocol + RFC 793: Transmission Control Protocol + RFC 4960: Stream Control Transmission Protocol + RFC 4340: Datagram Congestion Control Protocol (DCCP) + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + /*** collection of types related to autonomous systems ***/ + + typedef as-number { + type uint32; + description + "The as-number type represents autonomous system numbers + which identify an Autonomous System (AS). An AS is a set + of routers under a single technical administration, using + an interior gateway protocol and common metrics to route + packets within the AS, and using an exterior gateway + protocol to route packets to other ASes. IANA maintains + the AS number space and has delegated large parts to the + regional registries. + + Autonomous system numbers were originally limited to 16 + bits. BGP extensions have enlarged the autonomous system + number space to 32 bits. This type therefore uses an uint32 + base type without a range restriction in order to support + a larger autonomous system number space. + + In the value set and its semantics, this type is equivalent + to the InetAutonomousSystemNumber textual convention of + the SMIv2."; + reference + "RFC 1930: Guidelines for creation, selection, and registration + of an Autonomous System (AS) + RFC 4271: A Border Gateway Protocol 4 (BGP-4) + RFC 4001: Textual Conventions for Internet Network Addresses + RFC 6793: BGP Support for Four-Octet Autonomous System (AS) + Number Space"; + } + + /*** collection of types related to IP addresses and hostnames ***/ + + typedef ip-address { + type union { + type inet:ipv4-address; + type inet:ipv6-address; + } + description + "The ip-address type represents an IP address and is IP + version neutral. The format of the textual representation + implies the IP version. This type supports scoped addresses + by allowing zone identifiers in the address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '(%[\p{N}\p{L}]+)?'; + } + description + "The ipv4-address type represents an IPv4 address in + dotted-quad notation. The IPv4 address may include a zone + index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format for the zone index is the numerical + format"; + } + + typedef ipv6-address { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(%[\p{N}\p{L}]+)?'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(%.+)?'; + } + description + "The ipv6-address type represents an IPv6 address in full, + mixed, shortened, and shortened-mixed notation. The IPv6 + address may include a zone index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format of IPv6 addresses uses the textual + representation defined in Section 4 of RFC 5952. The + canonical format for the zone index is the numerical + format as described in Section 11.2 of RFC 4007."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-address-no-zone { + type union { + type inet:ipv4-address-no-zone; + type inet:ipv6-address-no-zone; + } + description + "The ip-address-no-zone type represents an IP address and is + IP version neutral. The format of the textual representation + implies the IP version. This type does not support scoped + addresses since it does not allow zone identifiers in the + address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address-no-zone { + type inet:ipv4-address { + pattern '[0-9\.]*'; + } + description + "An IPv4 address without a zone index. This type, derived from + ipv4-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + } + + typedef ipv6-address-no-zone { + type inet:ipv6-address { + pattern '[0-9a-fA-F:\.]*'; + } + description + "An IPv6 address without a zone index. This type, derived from + ipv6-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-prefix { + type union { + type inet:ipv4-prefix; + type inet:ipv6-prefix; + } + description + "The ip-prefix type represents an IP prefix and is IP + version neutral. The format of the textual representations + implies the IP version."; + } + + typedef ipv4-prefix { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '/(([0-9])|([1-2][0-9])|(3[0-2]))'; + } + description + "The ipv4-prefix type represents an IPv4 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 32. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The canonical format of an IPv4 prefix has all bits of + the IPv4 address set to zero that are not part of the + IPv4 prefix."; + } + + typedef ipv6-prefix { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(/.+)'; + } + + description + "The ipv6-prefix type represents an IPv6 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 128. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The IPv6 address should have all bits that do not belong + to the prefix set to zero. + + The canonical format of an IPv6 prefix has all bits of + the IPv6 address set to zero that are not part of the + IPv6 prefix. Furthermore, the IPv6 address is represented + as defined in Section 4 of RFC 5952."; + reference + "RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + /*** collection of domain name and URI types ***/ + + typedef domain-name { + type string { + pattern + '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*' + + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)' + + '|\.'; + length "1..253"; + } + description + "The domain-name type represents a DNS domain name. The + name SHOULD be fully qualified whenever possible. + + Internet domain names are only loosely specified. Section + 3.5 of RFC 1034 recommends a syntax (modified in Section + 2.1 of RFC 1123). The pattern above is intended to allow + for current practice in domain name use, and some possible + future expansion. It is designed to hold various types of + domain names, including names used for A or AAAA records + (host names) and other records, such as SRV records. Note + that Internet host names have a stricter syntax (described + in RFC 952) than the DNS recommendations in RFCs 1034 and + 1123, and that systems that want to store host names in + schema nodes using the domain-name type are recommended to + adhere to this stricter standard to ensure interoperability. + + The encoding of DNS names in the DNS protocol is limited + to 255 characters. Since the encoding consists of labels + prefixed by a length bytes and there is a trailing NULL + byte, only 253 characters can appear in the textual dotted + notation. + + The description clause of schema nodes using the domain-name + type MUST describe when and how these names are resolved to + IP addresses. Note that the resolution of a domain-name value + may require to query multiple DNS records (e.g., A for IPv4 + and AAAA for IPv6). The order of the resolution process and + which DNS record takes precedence can either be defined + explicitly or may depend on the configuration of the + resolver. + + Domain-name values use the US-ASCII encoding. Their canonical + format uses lowercase US-ASCII characters. Internationalized + domain names MUST be A-labels as per RFC 5890."; + reference + "RFC 952: DoD Internet Host Table Specification + RFC 1034: Domain Names - Concepts and Facilities + RFC 1123: Requirements for Internet Hosts -- Application + and Support + RFC 2782: A DNS RR for specifying the location of services + (DNS SRV) + RFC 5890: Internationalized Domain Names in Applications + (IDNA): Definitions and Document Framework"; + } + + typedef host { + type union { + type inet:ip-address; + type inet:domain-name; + } + description + "The host type represents either an IP address or a DNS + domain name."; + } + + typedef uri { + type string; + description + "The uri type represents a Uniform Resource Identifier + (URI) as defined by STD 66. + + Objects using the uri type MUST be in US-ASCII encoding, + and MUST be normalized as described by RFC 3986 Sections + 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary + percent-encoding is removed, and all case-insensitive + characters are set to lowercase except for hexadecimal + digits, which are normalized to uppercase as described in + Section 6.2.2.1. + + The purpose of this normalization is to help provide + unique URIs. Note that this normalization is not + sufficient to provide uniqueness. Two URIs that are + textually distinct after this normalization may still be + equivalent. + + Objects using the uri type may restrict the schemes that + they permit. For example, 'data:' and 'urn:' schemes + might not be appropriate. + + A zero-length URI is not a valid URI. This can be used to + express 'URI absent' where required. + + In the value set and its semantics, this type is equivalent + to the Uri SMIv2 textual convention defined in RFC 5017."; + reference + "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax + RFC 3305: Report from the Joint W3C/IETF URI Planning Interest + Group: Uniform Resource Identifiers (URIs), URLs, + and Uniform Resource Names (URNs): Clarifications + and Recommendations + RFC 5017: MIB Textual Conventions for Uniform Resource + Identifiers (URIs)"; + } + +} diff --git a/src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang b/src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang new file mode 100644 index 0000000000000000000000000000000000000000..ee58fa3ab0042120d5607b8713d21fa0ba845895 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang @@ -0,0 +1,474 @@ +module ietf-yang-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types"; + prefix "yang"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - yang-identifier + - hex-string + - uuid + - dotted-quad"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of counter and gauge types ***/ + + typedef counter32 { + type uint32; + description + "The counter32 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter32 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter32 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter32. + + In the value set and its semantics, this type is equivalent + to the Counter32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter32 { + type yang:counter32; + default "0"; + description + "The zero-based-counter32 type represents a counter32 + that has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter32 textual convention of the SMIv2."; + reference + "RFC 4502: Remote Network Monitoring Management Information + Base Version 2"; + } + + typedef counter64 { + type uint64; + description + "The counter64 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter64 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter64 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter64. + + In the value set and its semantics, this type is equivalent + to the Counter64 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter64 { + type yang:counter64; + default "0"; + description + "The zero-based-counter64 type represents a counter64 that + has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter64 textual convention of the SMIv2."; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + typedef gauge32 { + type uint32; + description + "The gauge32 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^32-1 (4294967295 decimal), and + the minimum value cannot be smaller than 0. The value of + a gauge32 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge32 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the Gauge32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef gauge64 { + type uint64; + description + "The gauge64 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^64-1 (18446744073709551615), and + the minimum value cannot be smaller than 0. The value of + a gauge64 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge64 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the CounterBasedGauge64 SMIv2 textual convention defined + in RFC 2856"; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + /*** collection of identifier-related types ***/ + + typedef object-identifier { + type string { + pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))' + + '(\.(0|([1-9]\d*)))*'; + } + description + "The object-identifier type represents administratively + assigned names in a registration-hierarchical-name tree. + + Values of this type are denoted as a sequence of numerical + non-negative sub-identifier values. Each sub-identifier + value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers + are separated by single dots and without any intermediate + whitespace. + + The ASN.1 standard restricts the value space of the first + sub-identifier to 0, 1, or 2. Furthermore, the value space + of the second sub-identifier is restricted to the range + 0 to 39 if the first sub-identifier is 0 or 1. Finally, + the ASN.1 standard requires that an object identifier + has always at least two sub-identifiers. The pattern + captures these restrictions. + + Although the number of sub-identifiers is not limited, + module designers should realize that there may be + implementations that stick with the SMIv2 limit of 128 + sub-identifiers. + + This type is a superset of the SMIv2 OBJECT IDENTIFIER type + since it is not restricted to 128 sub-identifiers. Hence, + this type SHOULD NOT be used to represent the SMIv2 OBJECT + IDENTIFIER type; the object-identifier-128 type SHOULD be + used instead."; + reference + "ISO9834-1: Information technology -- Open Systems + Interconnection -- Procedures for the operation of OSI + Registration Authorities: General procedures and top + arcs of the ASN.1 Object Identifier tree"; + } + + typedef object-identifier-128 { + type object-identifier { + pattern '\d*(\.\d*){1,127}'; + } + description + "This type represents object-identifiers restricted to 128 + sub-identifiers. + + In the value set and its semantics, this type is equivalent + to the OBJECT IDENTIFIER type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef yang-identifier { + type string { + length "1..max"; + pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*'; + pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*'; + } + description + "A YANG identifier string as defined by the 'identifier' + rule in Section 12 of RFC 6020. An identifier must + start with an alphabetic character or an underscore + followed by an arbitrary sequence of alphabetic or + numeric characters, underscores, hyphens, or dots. + + A YANG identifier MUST NOT start with any possible + combination of the lowercase or uppercase character + sequence 'xml'."; + reference + "RFC 6020: YANG - A Data Modeling Language for the Network + Configuration Protocol (NETCONF)"; + } + + /*** collection of types related to date and time***/ + + typedef date-and-time { + type string { + pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?' + + '(Z|[\+\-]\d{2}:\d{2})'; + } + description + "The date-and-time type is a profile of the ISO 8601 + standard for representation of dates and times using the + Gregorian calendar. The profile is defined by the + date-time production in Section 5.6 of RFC 3339. + + The date-and-time type is compatible with the dateTime XML + schema type with the following notable exceptions: + + (a) The date-and-time type does not allow negative years. + + (b) The date-and-time time-offset -00:00 indicates an unknown + time zone (see RFC 3339) while -00:00 and +00:00 and Z + all represent the same time zone in dateTime. + + (c) The canonical format (see below) of data-and-time values + differs from the canonical format used by the dateTime XML + schema type, which requires all times to be in UTC using + the time-offset 'Z'. + + This type is not equivalent to the DateAndTime textual + convention of the SMIv2 since RFC 3339 uses a different + separator between full-date and full-time and provides + higher resolution of time-secfrac. + + The canonical format for date-and-time values with a known time + zone uses a numeric time zone offset that is calculated using + the device's configured known offset to UTC time. A change of + the device's offset to UTC time will cause date-and-time values + to change accordingly. Such changes might happen periodically + in case a server follows automatically daylight saving time + (DST) time zone offset changes. The canonical format for + date-and-time values with an unknown time zone (usually + referring to the notion of local time) uses the time-offset + -00:00."; + reference + "RFC 3339: Date and Time on the Internet: Timestamps + RFC 2579: Textual Conventions for SMIv2 + XSD-TYPES: XML Schema Part 2: Datatypes Second Edition"; + } + + typedef timeticks { + type uint32; + description + "The timeticks type represents a non-negative integer that + represents the time, modulo 2^32 (4294967296 decimal), in + hundredths of a second between two epochs. When a schema + node is defined that uses this type, the description of + the schema node identifies both of the reference epochs. + + In the value set and its semantics, this type is equivalent + to the TimeTicks type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef timestamp { + type yang:timeticks; + description + "The timestamp type represents the value of an associated + timeticks schema node at which a specific occurrence + happened. The specific occurrence must be defined in the + description of any schema node defined using this type. When + the specific occurrence occurred prior to the last time the + associated timeticks attribute was zero, then the timestamp + value is zero. Note that this requires all timestamp values + to be reset to zero when the value of the associated timeticks + attribute reaches 497+ days and wraps around to zero. + + The associated timeticks schema node must be specified + in the description of any schema node using this type. + + In the value set and its semantics, this type is equivalent + to the TimeStamp textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of generic address types ***/ + + typedef phys-address { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "Represents media- or physical-level addresses represented + as a sequence octets, each octet represented by two hexadecimal + numbers. Octets are separated by colons. The canonical + representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the PhysAddress textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + typedef mac-address { + type string { + pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'; + } + description + "The mac-address type represents an IEEE 802 MAC address. + The canonical representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the MacAddress textual convention of the SMIv2."; + reference + "IEEE 802: IEEE Standard for Local and Metropolitan Area + Networks: Overview and Architecture + RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of XML-specific types ***/ + + typedef xpath1.0 { + type string; + description + "This type represents an XPATH 1.0 expression. + + When a schema node is defined that uses this type, the + description of the schema node MUST specify the XPath + context in which the XPath expression is evaluated."; + reference + "XPATH: XML Path Language (XPath) Version 1.0"; + } + + /*** collection of string types ***/ + + typedef hex-string { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + description + "A hexadecimal string with octets represented as hex digits + separated by colons. The canonical representation uses + lowercase characters."; + } + + typedef uuid { + type string { + pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'; + } + description + "A Universally Unique IDentifier in the string representation + defined in RFC 4122. The canonical representation uses + lowercase characters. + + The following is an example of a UUID in string representation: + f81d4fae-7dec-11d0-a765-00a0c91e6bf6 + "; + reference + "RFC 4122: A Universally Unique IDentifier (UUID) URN + Namespace"; + } + + typedef dotted-quad { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'; + } + description + "An unsigned 32-bit number expressed in the dotted-quad + notation, i.e., four octets written as decimal numbers + and separated with the '.' (full stop) character."; + } +}