diff --git a/.gitignore b/.gitignore index 20b98c30c5b3edb0983578b0a5f74fb1c1f3025e..e1f87cfd3842c264bd219237e9afe113d61c35bc 100644 --- a/.gitignore +++ b/.gitignore @@ -176,3 +176,6 @@ libyang/ # Other logs **/logs/*.log.* + +# PySpark checkpoints +src/analytics/.spark/* diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e2d653e0360b694891adc966d6d0b1124ed72ac4..115b336761dd94902597c3b6e21e7d3dcf225af1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -45,6 +45,9 @@ include: #- local: '/src/dlt/.gitlab-ci.yml' - local: '/src/load_generator/.gitlab-ci.yml' - local: '/src/bgpls_speaker/.gitlab-ci.yml' - + - local: '/src/kpi_manager/.gitlab-ci.yml' + - local: '/src/kpi_value_api/.gitlab-ci.yml' + - local: '/src/kpi_value_writer/.gitlab-ci.yml' + - local: '/src/telemetry/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/deploy/all.sh b/deploy/all.sh index c169bc92c0d9a6dea87de919ad20b4cf3afc1199..06b8ee701530f56381080879d0e2941b664e5197 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -27,7 +27,47 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"} + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} @@ -67,8 +107,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. -# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while -# checking/deploying CockroachDB. # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster @@ -80,7 +118,7 @@ export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} # If not already set, disable flag for dropping database, if it exists. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! -# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while +# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while # checking/deploying CockroachDB. export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""} @@ -102,6 +140,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} +# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. +# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster +# with 3 replicas (set by default) will be deployed. It is convenient for production and +# provides scalability features. +export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"} + # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. @@ -137,7 +183,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} # If not already set, disable flag for dropping tables if they exist. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! # If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables -# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while +# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while # checking/deploying QuestDB. export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} @@ -169,6 +215,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} # Deploy QuestDB ./deploy/qdb.sh +# Deploy Apache Kafka +./deploy/kafka.sh + # Expose Dashboard ./deploy/expose_dashboard.sh diff --git a/deploy/crdb.sh b/deploy/crdb.sh index c979ad4f2c18861c6a93b6b04e5d8e3e71aae41e..3e80b6350e66ec30a725c45acb7cf954ac3009c8 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -37,8 +37,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. -# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while -# checking/deploying CockroachDB. # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster @@ -48,7 +46,7 @@ export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} # Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} -# If not already set, disable flag for dropping database if exists. +# If not already set, disable flag for dropping database, if it exists. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while # checking/deploying CockroachDB. @@ -79,7 +77,7 @@ function crdb_deploy_single() { kubectl create namespace ${CRDB_NAMESPACE} echo - echo "CockroachDB (single-node)" + echo "CockroachDB (single-mode)" echo ">>> Checking if CockroachDB is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then echo ">>> CockroachDB is present; skipping step." @@ -139,7 +137,7 @@ function crdb_deploy_single() { } function crdb_undeploy_single() { - echo "CockroachDB" + echo "CockroachDB (single-mode)" echo ">>> Checking if CockroachDB is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then echo ">>> Undeploy CockroachDB" @@ -223,7 +221,7 @@ function crdb_deploy_cluster() { kubectl create namespace ${CRDB_NAMESPACE} echo - echo "CockroachDB" + echo "CockroachDB (cluster-mode)" echo ">>> Checking if CockroachDB is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then echo ">>> CockroachDB is present; skipping step." @@ -319,7 +317,7 @@ function crdb_undeploy_cluster() { fi echo - echo "CockroachDB" + echo "CockroachDB (cluster-mode)" echo ">>> Checking if CockroachDB is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then echo ">>> Undeploy CockroachDB" diff --git a/deploy/kafka.sh b/deploy/kafka.sh new file mode 100755 index 0000000000000000000000000000000000000000..0483bce153b457800c6f7db2ef66685e90118111 --- /dev/null +++ b/deploy/kafka.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# If not already set, set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} + +# If not already set, set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"} + +# If not already set, if flag is YES, Apache Kafka will be redeployed and all topics will be lost. +export KFK_REDEPLOY=${KFK_REDEPLOY:-""} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + + # Constants + TMP_FOLDER="./tmp" + KFK_MANIFESTS_PATH="manifests/kafka" + KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml" + KFK_MANIFEST="02-kafka.yaml" + + # Create a tmp folder for files modified during the deployment + TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests" + mkdir -p ${TMP_MANIFESTS_FOLDER} + +function kafka_deploy() { + # copy zookeeper and kafka manifest files to temporary manifest location + cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" + cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}" + + # echo "Apache Kafka Namespace" + echo ">>> Delete Apache Kafka Namespace" + kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found + + echo ">>> Create Apache Kafka Namespace" + kubectl create namespace ${KFK_NAMESPACE} + + # echo ">>> Deplying Apache Kafka Zookeeper" + # Kafka zookeeper service should be deployed before the kafka service + kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" + + KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically + KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') + + # Kafka service should be deployed after the zookeeper service + sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + + # echo ">>> Deploying Apache Kafka Broker" + kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + + # echo ">>> Verifing Apache Kafka deployment" + sleep 5 + # KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods) + # if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then + # echo "Deployment Error: \n $KFK_PODS_STATUS" + # else + # echo "$KFK_PODS_STATUS" + # fi +} + +echo "Apache Kafka" +echo ">>> Checking if Apache Kafka is deployed ... " +if [ "$KFK_REDEPLOY" == "YES" ]; then + echo ">>> Redeploying kafka namespace" + kafka_deploy +elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then + echo ">>> Apache Kafka already present; skipping step." +else + echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace" + kafka_deploy +fi +echo diff --git a/deploy/nats.sh b/deploy/nats.sh index 366270a6915a1eef969846446ecc9152c3fa9531..e9cef883ee7b909255d44551919771ebc49f524b 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -27,6 +27,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} +# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. +# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster +# with 3 replicas (set by default) will be deployed. It is convenient for production and +# provides scalability features. +export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"} + # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. @@ -37,6 +45,14 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} # Automated steps start here ######################################################################################################################## +# Constants +TMP_FOLDER="./tmp" +NATS_MANIFESTS_PATH="manifests/nats" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests" +mkdir -p $TMP_MANIFESTS_FOLDER + function nats_deploy_single() { echo "NATS Namespace" echo ">>> Create NATS Namespace (if missing)" @@ -47,18 +63,86 @@ function nats_deploy_single() { helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ echo + echo "Install NATS (single-mode)" + echo ">>> Checking if NATS is deployed..." + if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then + echo ">>> NATS is present; skipping step." + else + echo ">>> Deploy NATS" + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true + + + echo ">>> Waiting NATS statefulset to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do + printf "%c" "." + sleep 1 + done + + # Wait for statefulset condition "Available=True" does not work + # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error: + # "error: readyReplicas is not found" + # Workaround: Check the pods are ready + #echo ">>> NATS statefulset created. Waiting for readiness condition..." + #kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats + #kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \ + # statefulset/nats + echo ">>> NATS statefulset created. Waiting NATS pods to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do + printf "%c" "." + sleep 1 + done + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0 + fi + echo + + echo "NATS Port Mapping" + echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})" + NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') + PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})" + NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}') + PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo +} + + +function nats_deploy_cluster() { + echo "NATS Namespace" + echo ">>> Create NATS Namespace (if missing)" + kubectl create namespace ${NATS_NAMESPACE} + echo + + echo "Add NATS Helm Chart" + helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ + echo + echo "Upgrade NATS Helm Chart" helm3 repo update nats echo - echo "Install NATS (single-node)" + echo "Install NATS (cluster-mode)" echo ">>> Checking if NATS is deployed..." if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then echo ">>> NATS is present; skipping step." else echo ">>> Deploy NATS" - helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine - + cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml" + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml" + echo ">>> Waiting NATS statefulset to be created..." while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do printf "%c" "." @@ -78,7 +162,17 @@ function nats_deploy_single() { printf "%c" "." sleep 1 done + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do + printf "%c" "." + sleep 1 + done + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do + printf "%c" "." + sleep 1 + done kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0 + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1 + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2 fi echo @@ -110,7 +204,7 @@ function nats_deploy_single() { echo } -function nats_undeploy_single() { +function nats_undeploy() { echo "NATS" echo ">>> Checking if NATS is deployed..." if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then @@ -128,7 +222,13 @@ function nats_undeploy_single() { } if [ "$NATS_REDEPLOY" == "YES" ]; then - nats_undeploy_single + nats_undeploy fi -nats_deploy_single +if [ "$NATS_DEPLOY_MODE" == "single" ]; then + nats_deploy_single +elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then + nats_deploy_cluster +else + echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE" +fi \ No newline at end of file diff --git a/deploy/qdb.sh b/deploy/qdb.sh index acbcfd4f96ccbd2b09d5d82f66a1bf801a710780..ebb75dce9ad3007145a5129df3a4037a9392e875 100755 --- a/deploy/qdb.sh +++ b/deploy/qdb.sh @@ -44,7 +44,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} # If not already set, disable flag for dropping tables if they exist. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! -# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables +# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables # QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped # while checking/deploying QuestDB. export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 3fdbe77fb502c42aaf7dd507ab239f6b3bb20056..189ae11e16e77196d6728482b7f16443149b60a9 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"} # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} @@ -115,6 +115,17 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"} export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} +# ----- Apache Kafka ------------------------------------------------------ + +# If not already set, set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} + +# If not already set, set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"} + +# If not already set, if flag is YES, Apache Kafka will be redeployed and topic will be lost. +export KFK_REDEPLOY=${KFK_REDEPLOY:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -137,15 +148,59 @@ printf "\n" echo "Create secret with CockroachDB data" CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +CRDB_DATABASE_CONTEXT=${CRDB_DATABASE} # TODO: change by specific configurable environment variable kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ - --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE_CONTEXT} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with CockroachDB data for KPI Management microservices" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt" # TODO: change by specific configurable environment variable +kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE_KPI_MGMT} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with CockroachDB data for Telemetry microservices" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +CRDB_DATABASE_TELEMETRY="tfs_telemetry" # TODO: change by specific configurable environment variable +kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \ --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ --from-literal=CRDB_SSLMODE=require printf "\n" +echo "Create secret with CockroachDB data for Analytics microservices" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +CRDB_DATABASE_ANALYTICS="tfs_analytics" # TODO: change by specific configurable environment variable +kubectl create secret generic crdb-analytics --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE_ANALYTICS} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with Apache Kafka data for KPI, Telemetry and Analytics microservices" +KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}') +kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \ + --from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT} +printf "\n" + echo "Create secret with NATS data" NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') if [ -z "$NATS_CLIENT_PORT" ]; then @@ -174,6 +229,21 @@ kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type=' --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} printf "\n" +# Check if "dlt" is in the list of components +if [[ " ${TFS_COMPONENTS[@]} " =~ " dlt " ]]; then + echo "Create secret for HLF keystore" + kubectl create secret generic dlt-keystone --namespace ${TFS_K8S_NAMESPACE} --from-file=keystore=${KEY_DIRECTORY_PATH} + printf "\n" + + echo "Create secret for HLF signcerts" + kubectl create secret generic dlt-signcerts --namespace ${TFS_K8S_NAMESPACE} --from-file=signcerts.pem=${CERT_DIRECTORY_PATH} + printf "\n" + + echo "Create secret for HLF ca.crt" + kubectl create secret generic dlt-ca-crt --namespace ${TFS_K8S_NAMESPACE} --from-file=ca.crt=${TLS_CERT_PATH} + printf "\n" +fi + echo "Deploying components and collecting environment variables..." ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT @@ -204,6 +274,14 @@ if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then DOCKER_BUILD="docker buildx build" fi +LINKERD_STATUS="$(microk8s status -a linkerd)" +if [[ $linkerd_status =~ "enabled" ]]; then + echo "LinkerD installed: workloads will be injected" +else + echo "LinkerD not installed" +fi +printf "\n" + for COMPONENT in $TFS_COMPONENTS; do echo "Processing '$COMPONENT' component..." @@ -213,15 +291,17 @@ for COMPONENT in $TFS_COMPONENTS; do if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" - elif [ "$COMPONENT" == "pathcomp" ]; then + elif [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" - # next command is redundant, but helpful to keep cache updated between rebuilds - IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" - $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + if [ "$COMPONENT" == "pathcomp" ]; then + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + fi elif [ "$COMPONENT" == "dlt" ]; then BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" @@ -234,7 +314,7 @@ for COMPONENT in $TFS_COMPONENTS; do echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." - if [ "$COMPONENT" == "pathcomp" ]; then + if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ] ; then IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" @@ -279,10 +359,13 @@ for COMPONENT in $TFS_COMPONENTS; do echo " Adapting '$COMPONENT' manifest file..." MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" - # cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" - cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST" + if [[ $linkerd_status =~ "enabled" ]]; then + cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST" + else + cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" + fi - if [ "$COMPONENT" == "pathcomp" ]; then + if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" @@ -316,7 +399,7 @@ for COMPONENT in $TFS_COMPONENTS; do echo " Deploying '$COMPONENT' component to Kubernetes..." DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" - COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/g") #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" @@ -367,7 +450,7 @@ printf "\n" for COMPONENT in $TFS_COMPONENTS; do echo "Waiting for '$COMPONENT' component..." - COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/g") kubectl wait --namespace $TFS_K8S_NAMESPACE \ --for='condition=available' --timeout=90s deployment/${COMPONENT_OBJNAME}service WAIT_EXIT_CODE=$? diff --git a/hackfest/containerlab/README.md b/hackfest/containerlab/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e08e944e991d8cb8599e89fab63f4b8b664ed73c --- /dev/null +++ b/hackfest/containerlab/README.md @@ -0,0 +1,35 @@ +# ContainerLab + +The setup consists of a management network for configuring and managing nodes. +srl1 and srl2 are interconnected. +client1 is connected to srl1 and client2 to srl2. +Routing between client1 and client2 is set up via the Nokia SR Linux nodes. + +## Management Network +Name: mgmt-net +Subnet: 172.100.100.0/24 + +## Node Kinds +Nokia SR Linux: Image ghcr.io/nokia/srlinux:23.10.3 +Linux: Image ghcr.io/hellt/network-multitool + +## Nodes + +### Nokia SR Linux +- Type: ixr6 +- CPU: 0.5 +- Memory: 2GB +- Management IP: 172.100.100.101 + +The provided SR Linux CLI commands in the _srl.cli_ enables system management and configures the GNMI server with OpenConfig models. + +### Linux + +Assigns IP 172.16.1.10/24 to eth1 and adds route to 172.16.2.0/24 via 172.16.1.1 + +In this topology file, the clients are pre-configured with the respectivly IP addresses in their interfaces and routes in their IP tables. + +### Links +- Connect srl1:e1-1 to srl2:e1-1 +- Connect client1:eth1 to srl1:e1-2 +- Connect client2:eth1 to srl2:e1-2 \ No newline at end of file diff --git a/hackfest/containerlab/commands.txt b/hackfest/containerlab/commands.txt index df5fbc0ce0163f4ce06b862e90e29854dbae204a..ac91d4b08b913209151e4024eb04b31384ed641a 100644 --- a/hackfest/containerlab/commands.txt +++ b/hackfest/containerlab/commands.txt @@ -83,19 +83,19 @@ $ssh admin@clab-tfs-scenario-srl1 # Check configurations done: -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl1-nis.json -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl1-ifs.json -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl2-nis.json -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl2-ifs.json +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl1-nis.json +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl1-ifs.json +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl2-nis.json +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl2-ifs.json # Delete elements: -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' -gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' -gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' +gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' # Run gNMI Driver in standalone mode (advanced) PYTHONPATH=./src python -m src.device.tests.test_gnmi diff --git a/hackfest/containerlab/srl.cli b/hackfest/containerlab/srl.cli new file mode 100644 index 0000000000000000000000000000000000000000..7d4987e22795ddd4667e030abddbc11827b9dc66 --- /dev/null +++ b/hackfest/containerlab/srl.cli @@ -0,0 +1,2 @@ +set / system management openconfig admin-state enable +set / system gnmi-server network-instance mgmt yang-models openconfig diff --git a/hackfest/containerlab/tfs-scenario.clab.yml b/hackfest/containerlab/tfs-scenario.clab.yml index f79378757827ff706be849b03277b947ee85f7fb..c715a1a539b59d807cd13c03f75f7de2e7bae084 100644 --- a/hackfest/containerlab/tfs-scenario.clab.yml +++ b/hackfest/containerlab/tfs-scenario.clab.yml @@ -23,35 +23,41 @@ mgmt: topology: kinds: - srl: - image: ghcr.io/nokia/srlinux:23.3.1 + nokia_srlinux: + image: ghcr.io/nokia/srlinux:23.10.3 linux: image: ghcr.io/hellt/network-multitool nodes: srl1: - kind: srl + kind: nokia_srlinux type: ixr6 cpu: 0.5 - memory: 1GB + memory: 2GB mgmt-ipv4: 172.100.100.101 - #startup-config: srl1.cli + startup-config: srl.cli srl2: - kind: srl + kind: nokia_srlinux type: ixr6 cpu: 0.5 - memory: 1GB + memory: 2GB mgmt-ipv4: 172.100.100.102 - #startup-config: srl2.cli + startup-config: srl.cli client1: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.201 + exec: + - ip address add 172.16.1.10/24 dev eth1 + - ip route add 172.16.2.0/24 via 172.16.1.1 client2: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.202 + exec: + - ip address add 172.16.2.10/24 dev eth1 + - ip route add 172.16.1.0/24 via 172.16.2.1 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] diff --git a/install_requirements.sh b/install_requirements.sh index cbd378eca81af17386100fc0ceb3757912d0ebf5..54b660a521dadc08a344d2f79f2db15271131a21 100755 --- a/install_requirements.sh +++ b/install_requirements.sh @@ -22,6 +22,7 @@ ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice" ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector" ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector" +ALL_COMPONENTS="${ALL_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api" TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS} # Some components require libyang built from source code diff --git a/manifests/analyticsservice.yaml b/manifests/analyticsservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fa3ed0be6eda8cf944e199543e3c2cd59cc98d6 --- /dev/null +++ b/manifests/analyticsservice.yaml @@ -0,0 +1,128 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: analyticsservice +spec: + selector: + matchLabels: + app: analyticsservice + #replicas: 1 + template: + metadata: + labels: + app: analyticsservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: frontend + image: labs.etsi.org:5050/tfs/controller/analytics-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 30080 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: crdb-analytics + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30080"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30080"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: backend + image: labs.etsi.org:5050/tfs/controller/analytics-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 30090 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30090"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30090"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: analyticsservice + labels: + app: analyticsservice +spec: + type: ClusterIP + selector: + app: analyticsservice + ports: + - name: frontend-grpc + protocol: TCP + port: 30080 + targetPort: 30080 + - name: backend-grpc + protocol: TCP + port: 30090 + targetPort: 30090 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: analyticsservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: analyticsservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml index 4d9ef0f844b5ffb02753b6cc7a7be7d03928896c..bcb0c704948ecdbd8b271b68e685c481e669594b 100644 --- a/manifests/cockroachdb/cluster.yaml +++ b/manifests/cockroachdb/cluster.yaml @@ -39,8 +39,8 @@ spec: cpu: 8 memory: 8Gi tlsEnabled: true -# You can set either a version of the db or a specific image name -# cockroachDBVersion: v22.2.8 + # You can set either a version of the db or a specific image name + # cockroachDBVersion: v22.2.8 image: name: cockroachdb/cockroach:v22.2.8 # nodes refers to the number of crdb pods that are created @@ -49,21 +49,16 @@ spec: additionalLabels: crdb: is-cool # affinity is a new API field that is behind a feature gate that is - # disabled by default. To enable please see the operator.yaml file. + # disabled by default. To enable please see the operator.yaml file. # The affinity field will accept any podSpec affinity rule. - # affinity: - # podAntiAffinity: - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 100 - # podAffinityTerm: - # labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - cockroachdb - # topologyKey: kubernetes.io/hostname + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/instance: cockroachdb # nodeSelectors used to match against # nodeSelector: diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml index 59d515061c4c0f253523aab803653b3f33007461..d8e691308e4cc16af3f545d87244281ab0730696 100644 --- a/manifests/cockroachdb/operator.yaml +++ b/manifests/cockroachdb/operator.yaml @@ -381,6 +381,7 @@ spec: spec: containers: - args: + - -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true - -zap-log-level - info env: diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index 34f0d53c3116183696b6f7f49682596092f313ae..66bd3724c7a6244ea553d7199968c900bce0611c 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -12,6 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +apiVersion: v1 +kind: ConfigMap +metadata: + name: dlt-config +data: + CHANNEL_NAME: "tfs_channel" # Change according to your blockchain configuration + CHAINCODE_NAME: "tfs_dlt" # Change according to your blockchain configuration + MSP_ID: "ETSI" # Change according to your blockchain configuration + PEER_ENDPOINT: "127.0.0.1:7051" # Change according to your blockchain configuration + PEER_HOST_ALIAS: "peer0.org1.tfs.etsi.org" # Change according to your blockchain configuration + KEY_DIRECTORY_PATH: "/etc/hyperledger/fabric-keystore/keystore" + CERT_DIRECTORY_PATH: "/etc/hyperledger/fabric-signcerts/signcerts.pem" + TLS_CERT_PATH: "/etc/hyperledger/fabric-ca-crt/ca.crt" + +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -78,6 +93,52 @@ spec: limits: cpu: 700m memory: 1024Mi + volumeMounts: + - name: keystore + mountPath: /etc/hyperledger/fabric-keystore + readOnly: true + - name: signcerts + mountPath: /etc/hyperledger/fabric-signcerts + readOnly: true + - name: ca-crt + mountPath: /etc/hyperledger/fabric-ca-crt + readOnly: true + envFrom: + - configMapRef: + name: dlt-config + env: + - name: KEY_DIRECTORY_PATH + value: "/etc/hyperledger/fabric-keystore/keystore" + - name: CERT_DIRECTORY_PATH + value: "/etc/hyperledger/fabric-signcerts/signcerts.pem" + - name: TLS_CERT_PATH + value: "/etc/hyperledger/fabric-ca-crt/ca.crt" + volumes: + - name: keystore + secret: + secretName: dlt-keystone + - name: signcerts + secret: + secretName: dlt-signcerts + - name: ca-crt + secret: + secretName: dlt-ca-crt + +--- +apiVersion: v1 +kind: Service +metadata: + name: gatewayservice +spec: + selector: + app: dltservice + ports: + - protocol: TCP + port: 50051 + targetPort: 50051 + nodePort: 32001 + type: NodePort + --- apiVersion: v1 kind: Service diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index 9be6032cfbb59cb580219ca71451be24dac93205..8926dcdafdea90ad7dea41eca854cbcb30853553 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -38,6 +38,8 @@ spec: value: "INFO" - name: TOPOLOGY_ABSTRACTOR value: "DISABLE" + - name: DLT_INTEGRATION + value: "DISABLE" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10010"] diff --git a/manifests/kafka/01-zookeeper.yaml b/manifests/kafka/01-zookeeper.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c0e87ae0c6f12ed56702220f9e15fbe90b3b9c31 --- /dev/null +++ b/manifests/kafka/01-zookeeper.yaml @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + labels: + app: zookeeper-service + name: zookeeper-service + namespace: kafka +spec: + type: NodePort + ports: + - name: zookeeper-port + port: 2181 + nodePort: 30181 + targetPort: 2181 + selector: + app: zookeeper +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: zookeeper + name: zookeeper + namespace: kafka +spec: + replicas: 1 + selector: + matchLabels: + app: zookeeper + template: + metadata: + labels: + app: zookeeper + spec: + containers: + - image: wurstmeister/zookeeper + imagePullPolicy: IfNotPresent + name: zookeeper + ports: + - containerPort: 2181 \ No newline at end of file diff --git a/manifests/kafka/02-kafka.yaml b/manifests/kafka/02-kafka.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8400f5944193458ccdad8be5dbc189f8f40cdd7b --- /dev/null +++ b/manifests/kafka/02-kafka.yaml @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + labels: + app: kafka-broker + name: kafka-service + namespace: kafka +spec: + ports: + - port: 9092 + selector: + app: kafka-broker +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kafka-broker + name: kafka-broker + namespace: kafka +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-broker + template: + metadata: + labels: + app: kafka-broker + spec: + hostname: kafka-broker + containers: + - env: + - name: KAFKA_BROKER_ID + value: "1" + - name: KAFKA_ZOOKEEPER_CONNECT + value: <ZOOKEEPER_INTERNAL_IP>:2181 + - name: KAFKA_LISTENERS + value: PLAINTEXT://:9092 + - name: KAFKA_ADVERTISED_LISTENERS + value: PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092 + image: wurstmeister/kafka + imagePullPolicy: IfNotPresent + name: kafka-broker + ports: + - containerPort: 9092 diff --git a/manifests/kpi_managerservice.yaml b/manifests/kpi_managerservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..984d783a9de7ed3c0c02e87d82ec673dc19c9508 --- /dev/null +++ b/manifests/kpi_managerservice.yaml @@ -0,0 +1,99 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kpi-managerservice +spec: + selector: + matchLabels: + app: kpi-managerservice + #replicas: 1 + template: + metadata: + annotations: + config.linkerd.io/skip-outbound-ports: "4222" + labels: + app: kpi-managerservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: server + image: labs.etsi.org:5050/tfs/controller/kpi_manager:latest + imagePullPolicy: Always + ports: + - containerPort: 30010 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: crdb-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30010"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30010"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: kpi-managerservice + labels: + app: kpi-managerservice +spec: + type: ClusterIP + selector: + app: kpi-managerservice + ports: + - name: grpc + protocol: TCP + port: 30010 + targetPort: 30010 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: kpi-managerservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: kpi-managerservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/manifests/kpi_value_apiservice.yaml b/manifests/kpi_value_apiservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4dcb00545ffaa33de39fd29c029780b777ea91f --- /dev/null +++ b/manifests/kpi_value_apiservice.yaml @@ -0,0 +1,99 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kpi-value-apiservice +spec: + selector: + matchLabels: + app: kpi-value-apiservice + #replicas: 1 + template: + metadata: + annotations: + config.linkerd.io/skip-outbound-ports: "4222" + labels: + app: kpi-value-apiservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: server + image: labs.etsi.org:5050/tfs/controller/kpi_value_api:latest + imagePullPolicy: Always + ports: + - containerPort: 30020 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30020"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30020"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: kpi-value-apiservice + labels: + app: kpi-value-apiservice +spec: + type: ClusterIP + selector: + app: kpi-value-apiservice + ports: + - name: grpc + protocol: TCP + port: 30020 + targetPort: 30020 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: kpi-value-apiservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: kpi-value-apiservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/manifests/kpi_value_writerservice.yaml b/manifests/kpi_value_writerservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e21e36f48ba08999f142e8548fed61cd2dfef0cc --- /dev/null +++ b/manifests/kpi_value_writerservice.yaml @@ -0,0 +1,99 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kpi-value-writerservice +spec: + selector: + matchLabels: + app: kpi-value-writerservice + #replicas: 1 + template: + metadata: + annotations: + config.linkerd.io/skip-outbound-ports: "4222" + labels: + app: kpi-value-writerservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: server + image: labs.etsi.org:5050/tfs/controller/kpi_value_writer:latest + imagePullPolicy: Always + ports: + - containerPort: 30030 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30030"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30030"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: kpi-value-writerservice + labels: + app: kpi-value-writerservice +spec: + type: ClusterIP + selector: + app: kpi-value-writerservice + ports: + - name: grpc + protocol: TCP + port: 30030 + targetPort: 30030 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: kpi-value-writerservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: kpi-value-writerservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml new file mode 100644 index 0000000000000000000000000000000000000000..00dbef17fca74ca906d4f97ee6e8751c03ef493f --- /dev/null +++ b/manifests/nats/cluster.yaml @@ -0,0 +1,47 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +container: + image: + tags: 2.9-alpine + env: + # different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB + # should be ~90% of memory limit + GOMEMLIMIT: 400MiB + merge: + # recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters + resources: + requests: + cpu: 1 + memory: 500Mi + limits: + cpu: 1 + memory: 1Gi + +config: + cluster: + enabled: true + replicas: 3 + jetstream: + enabled: true + fileStore: + pvc: + size: 4Gi + +# Force one pod per node, if possible +podTemplate: + topologySpreadConstraints: + kubernetes.io/hostname: + maxSkew: 1 + whenUnsatisfiable: ScheduleAnyway diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index 0892f0c9b790b936df5540ac5fe1aed0270b91a5..955d5726a9f8f79560327a8f595c1865f6d37d22 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -18,6 +18,11 @@ metadata: name: tfs-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/limit-rps: "50" + nginx.ingress.kubernetes.io/limit-connections: "50" + nginx.ingress.kubernetes.io/proxy-connect-timeout: "50" + nginx.ingress.kubernetes.io/proxy-send-timeout: "50" + nginx.ingress.kubernetes.io/proxy-read-timeout: "50" spec: rules: - http: diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f9917499a425b95d436ffa8cdb311d29483d2ca --- /dev/null +++ b/manifests/telemetryservice.yaml @@ -0,0 +1,128 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: telemetryservice +spec: + selector: + matchLabels: + app: telemetryservice + #replicas: 1 + template: + metadata: + labels: + app: telemetryservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: frontend + image: labs.etsi.org:5050/tfs/controller/telemetry-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 30050 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: crdb-telemetry + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30050"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30050"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: backend + image: labs.etsi.org:5050/tfs/controller/telemetry-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 30060 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30060"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30060"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: telemetryservice + labels: + app: telemetryservice +spec: + type: ClusterIP + selector: + app: telemetryservice + ports: + - name: frontend-grpc + protocol: TCP + port: 30050 + targetPort: 30050 + - name: backend-grpc + protocol: TCP + port: 30060 + targetPort: 30060 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: telemetryservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: telemetryservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index a519aa4a2f8a1e81f1b7f2a1be1965ec0b8bb386..19317323f2a60293a33d740b28b3795627846642 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -117,3 +117,25 @@ spec: - name: grafana port: 3000 targetPort: 3000 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: webuiservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: webuiservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/my_deploy.sh b/my_deploy.sh index 8417f6eae510391e65d5f91202e59cccf32e1f98..88be82b63e9e79a97ee79702de886f69a6152f94 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -22,9 +22,12 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator" -# Uncomment to activate Monitoring +# Uncomment to activate Monitoring (old) #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api" + # Uncomment to activate BGP-LS Speaker #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" @@ -59,6 +62,18 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_gene # Uncomment to activate E2E Orchestrator #export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +#export TFS_COMPONENTS="${TFS_COMPONENTS} app" + + # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" @@ -69,7 +84,7 @@ export TFS_K8S_NAMESPACE="tfs" export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" # Uncomment to monitor performance of components -export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" # Uncomment when deploying Optical CyberSecurity #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" @@ -123,6 +138,10 @@ export NATS_EXT_PORT_CLIENT="4222" # Set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP="8222" +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + # Disable flag for re-deploying NATS from scratch. export NATS_REDEPLOY="" @@ -167,3 +186,15 @@ export PROM_EXT_PORT_HTTP="9090" # Set the external port Grafana HTTP Dashboards will be exposed to. export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT="9092" + +# Set the flag to YES for redeploying of Apache Kafka +export KFK_REDEPLOY="" diff --git a/proto/acl.proto b/proto/acl.proto index d777768819c4cc0ca03614b6928d9c2d9511b449..b45d46226d2706396f6d4c0e73ce72e15a75f2d5 100644 --- a/proto/acl.proto +++ b/proto/acl.proto @@ -46,6 +46,7 @@ message AclMatch { uint32 dst_port = 6; uint32 start_mpls_label = 7; uint32 end_mpls_label = 8; + string tcp_flags = 9; } message AclAction { diff --git a/proto/analytics_frontend.proto b/proto/analytics_frontend.proto new file mode 100644 index 0000000000000000000000000000000000000000..ace0581db816bee1d0d20746f2b864dce602567b --- /dev/null +++ b/proto/analytics_frontend.proto @@ -0,0 +1,73 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package analytics_frontend; + +import "context.proto"; +import "kpi_manager.proto"; +//import "kpi_sample_types.proto"; + +service AnalyticsFrontendService { + rpc StartAnalyzer (Analyzer ) returns (AnalyzerId ) {} + rpc StopAnalyzer (AnalyzerId ) returns (context.Empty) {} + rpc SelectAnalyzers(AnalyzerFilter) returns (AnalyzerList ) {} +} + +message AnalyzerId { + context.Uuid analyzer_id = 1; +} + +enum AnalyzerOperationMode { + ANALYZEROPERATIONMODE_UNSPECIFIED = 0; + ANALYZEROPERATIONMODE_BATCH = 1; + ANALYZEROPERATIONMODE_STREAMING = 2; +} + +// duration field may be added in analyzer... +message Analyzer { + AnalyzerId analyzer_id = 1; + string algorithm_name = 2; // The algorithm to be executed + float duration_s = 3; // Termiate the data analytics thread after duration (seconds); 0 = infinity time + repeated kpi_manager.KpiId input_kpi_ids = 4; // The KPI Ids to be processed by the analyzer + repeated kpi_manager.KpiId output_kpi_ids = 5; // The KPI Ids produced by the analyzer + AnalyzerOperationMode operation_mode = 6; // Operation mode of the analyzer + map<string, string> parameters = 7; // Add dictionary of (key, value) pairs such as (window_size, 10) etc. + // In batch mode... + float batch_min_duration_s = 8; // ..., min duration to collect before executing batch + float batch_max_duration_s = 9; // ..., max duration collected to execute the batch + uint64 batch_min_size = 10; // ..., min number of samples to collect before executing batch + uint64 batch_max_size = 11; // ..., max number of samples collected to execute the batch +} + +message AnalyzerFilter { + // Analyzer that fulfill the filter are those that match ALL the following fields. + // An empty list means: any value is accepted. + // All fields empty means: list all Analyzers + repeated AnalyzerId analyzer_id = 1; + repeated string algorithm_names = 2; + repeated kpi_manager.KpiId input_kpi_ids = 3; + repeated kpi_manager.KpiId output_kpi_ids = 4; + //repeated kpi_sample_types.KpiSampleType kpi_sample_type = 5; // Not implemented + //repeated context.DeviceId device_id = 6; // Not implemented + //repeated context.EndPointId endpoint_id = 7; // Not implemented + //repeated context.ServiceId service_id = 8; // Not implemented + //repeated context.SliceId slice_id = 9; // Not implemented + //repeated context.ConnectionId connection_id = 10; // Not implemented + //repeated context.LinkId link_id = 11; // Not implemented +} + +message AnalyzerList { + repeated Analyzer analyzer_list = 1; +} diff --git a/proto/device.proto b/proto/device.proto index 3d7ba14bb75e226c51d8d2462fca76a1cab86554..a1882f33f8e177502c456672a0517928f0259ef5 100644 --- a/proto/device.proto +++ b/proto/device.proto @@ -16,7 +16,7 @@ syntax = "proto3"; package device; import "context.proto"; -import "monitoring.proto"; +import "monitoring.proto"; // to be migrated to: "kpi_manager.proto" service DeviceService { rpc AddDevice (context.Device ) returns (context.DeviceId ) {} @@ -27,8 +27,8 @@ service DeviceService { } message MonitoringSettings { - monitoring.KpiId kpi_id = 1; - monitoring.KpiDescriptor kpi_descriptor = 2; - float sampling_duration_s = 3; - float sampling_interval_s = 4; + monitoring.KpiId kpi_id = 1; // to be migrated to: "kpi_manager.KpiId" + monitoring.KpiDescriptor kpi_descriptor = 2; // to be migrated to: "kpi_manager.KpiDescriptor" + float sampling_duration_s = 3; + float sampling_interval_s = 4; } diff --git a/proto/kpi_manager.proto b/proto/kpi_manager.proto new file mode 100644 index 0000000000000000000000000000000000000000..2640b58c60f004e51c8aeacc0ed76963f0436956 --- /dev/null +++ b/proto/kpi_manager.proto @@ -0,0 +1,60 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package kpi_manager; + +import "context.proto"; +import "kpi_sample_types.proto"; + +service KpiManagerService { + rpc SetKpiDescriptor (KpiDescriptor ) returns (KpiId ) {} + rpc DeleteKpiDescriptor (KpiId ) returns (context.Empty ) {} + rpc GetKpiDescriptor (KpiId ) returns (KpiDescriptor ) {} + rpc SelectKpiDescriptor (KpiDescriptorFilter) returns (KpiDescriptorList ) {} +} + +message KpiId { + context.Uuid kpi_id = 1; +} + +message KpiDescriptor { + KpiId kpi_id = 1; + string kpi_description = 2; + kpi_sample_types.KpiSampleType kpi_sample_type = 3; + context.DeviceId device_id = 4; + context.EndPointId endpoint_id = 5; + context.ServiceId service_id = 6; + context.SliceId slice_id = 7; + context.ConnectionId connection_id = 8; + context.LinkId link_id = 9; +} + +message KpiDescriptorFilter { + // KPI Descriptors that fulfill the filter are those that match ALL the following fields. + // An empty list means: any value is accepted. + // All fields empty means: list all KPI Descriptors + repeated KpiId kpi_id = 1; + repeated kpi_sample_types.KpiSampleType kpi_sample_type = 2; + repeated context.DeviceId device_id = 3; + repeated context.EndPointId endpoint_id = 4; + repeated context.ServiceId service_id = 5; + repeated context.SliceId slice_id = 6; + repeated context.ConnectionId connection_id = 7; + repeated context.LinkId link_id = 8; +} + +message KpiDescriptorList { + repeated KpiDescriptor kpi_descriptor_list = 1; +} diff --git a/proto/kpi_value_api.proto b/proto/kpi_value_api.proto new file mode 100644 index 0000000000000000000000000000000000000000..dff96272e3d05756dd19a49ecaede7311b196540 --- /dev/null +++ b/proto/kpi_value_api.proto @@ -0,0 +1,52 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package kpi_value_api; + +import "context.proto"; +import "kpi_manager.proto"; + +service KpiValueAPIService { + rpc StoreKpiValues (KpiValueList) returns (context.Empty) {} + rpc SelectKpiValues (KpiValueFilter) returns (KpiValueList) {} +} + +message KpiValue { + kpi_manager.KpiId kpi_id = 1; + context.Timestamp timestamp = 2; + KpiValueType kpi_value_type = 3; +} + +message KpiValueList { + repeated KpiValue kpi_value_list = 1; +} + +message KpiValueType { + oneof value { + int32 int32Val = 1; + uint32 uint32Val = 2; + int64 int64Val = 3; + uint64 uint64Val = 4; + float floatVal = 5; + string stringVal = 6; + bool boolVal = 7; + } +} + +message KpiValueFilter { + repeated kpi_manager.KpiId kpi_id = 1; + repeated context.Timestamp start_timestamp = 2; + repeated context.Timestamp end_timestamp = 3; +} diff --git a/proto/monitoring.proto b/proto/monitoring.proto old mode 100644 new mode 100755 index 2c1c2f8ad58192586c17e310e33bccebbe775ee8..083bd82854547478d3a8f4a8935fdf75e9070d9d --- a/proto/monitoring.proto +++ b/proto/monitoring.proto @@ -145,12 +145,12 @@ message SubsList { } message AlarmDescriptor { - AlarmID alarm_id = 1; - string alarm_description = 2; - string name = 3; - KpiId kpi_id = 4; - KpiValueRange kpi_value_range = 5; - context.Timestamp timestamp = 6; + AlarmID alarm_id = 1; + string alarm_description = 2; + string name = 3; + KpiId kpi_id = 4; + KpiValueRange kpi_value_range = 5; + context.Timestamp timestamp = 6; } message AlarmID{ @@ -170,5 +170,5 @@ message AlarmResponse { } message AlarmList { - repeated AlarmDescriptor alarm_descriptor = 1; + repeated AlarmDescriptor alarm_descriptor = 1; } diff --git a/proto/optical_attack_detector.proto b/proto/optical_attack_detector.proto index 783e23b35d754db983c75c56dadc203996beadd4..f74eea68b8c5a588f5ecc06a59916058cb8d9695 100644 --- a/proto/optical_attack_detector.proto +++ b/proto/optical_attack_detector.proto @@ -12,12 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// protocol buffers documentation: https://developers.google.com/protocol-buffers/docs/proto3 syntax = "proto3"; package optical_attack_detector; import "context.proto"; -import "monitoring.proto"; +import "monitoring.proto"; // to be migrated to: "kpi_manager.proto" service OpticalAttackDetectorService { @@ -28,5 +27,5 @@ service OpticalAttackDetectorService { message DetectionRequest { context.ServiceId service_id = 1; - monitoring.KpiId kpi_id = 2; + monitoring.KpiId kpi_id = 2; // to be migrated to: "kpi_manager.KpiId" } diff --git a/proto/policy_condition.proto b/proto/policy_condition.proto index add3ec1ab127674e171c366ffa49346892b3ff0d..612dcb1af8eb8adb0db65b8ae47301c87ad6b9ef 100644 --- a/proto/policy_condition.proto +++ b/proto/policy_condition.proto @@ -15,13 +15,13 @@ syntax = "proto3"; package policy; -import "monitoring.proto"; +import "monitoring.proto"; // to be migrated to: "kpi_manager.proto" // Condition message PolicyRuleCondition { - monitoring.KpiId kpiId = 1; - NumericalOperator numericalOperator = 2; - monitoring.KpiValue kpiValue = 3; + monitoring.KpiId kpiId = 1; // to be migrated to: "kpi_manager.KpiId" + NumericalOperator numericalOperator = 2; + monitoring.KpiValue kpiValue = 3; } // Operator to be used when comparing Kpis with condition values diff --git a/proto/telemetry_frontend.proto b/proto/telemetry_frontend.proto new file mode 100644 index 0000000000000000000000000000000000000000..614d10cf06cdbb1ff4fba6e51a39286eb5132688 --- /dev/null +++ b/proto/telemetry_frontend.proto @@ -0,0 +1,50 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package telemetry_frontend; + +import "context.proto"; +import "kpi_manager.proto"; + +service TelemetryFrontendService { + rpc StartCollector (Collector ) returns (CollectorId ) {} + rpc StopCollector (CollectorId ) returns (context.Empty) {} + rpc SelectCollectors (CollectorFilter) returns (CollectorList) {} +} + +message CollectorId { + context.Uuid collector_id = 1; +} + +message Collector { + CollectorId collector_id = 1; // The Collector ID + kpi_manager.KpiId kpi_id = 2; // The KPI Id to be associated to the collected samples + float duration_s = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely + float interval_s = 4; // Interval between collected samples + context.Timestamp start_time = 5; // Timestamp when Collector start execution + context.Timestamp end_time = 6; // Timestamp when Collector stop execution +} + +message CollectorFilter { + // Collector that fulfill the filter are those that match ALL the following fields. + // An empty list means: any value is accepted. + // All fields empty means: list all Collectors + repeated CollectorId collector_id = 1; + repeated kpi_manager.KpiId kpi_id = 2; +} + +message CollectorList { + repeated Collector collector_list = 1; +} diff --git a/scripts/run_tests_locally-analytics-DB.sh b/scripts/run_tests_locally-analytics-DB.sh new file mode 100755 index 0000000000000000000000000000000000000000..9df5068d6bde361a4a1e73b96990c0d407c88cb4 --- /dev/null +++ b/scripts/run_tests_locally-analytics-DB.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + analytics/tests/test_analytics_db.py diff --git a/scripts/run_tests_locally-analytics-frontend.sh b/scripts/run_tests_locally-analytics-frontend.sh new file mode 100755 index 0000000000000000000000000000000000000000..e30d30da623b2d0eee3d925d69a846b4b1f516a3 --- /dev/null +++ b/scripts/run_tests_locally-analytics-frontend.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + analytics/frontend/tests/test_frontend.py diff --git a/scripts/run_tests_locally-device-openconfig-ocnos.sh b/scripts/run_tests_locally-device-openconfig-ocnos.sh new file mode 100755 index 0000000000000000000000000000000000000000..60af6768d37199c957d17c6804c8af1072d0b0e1 --- /dev/null +++ b/scripts/run_tests_locally-device-openconfig-ocnos.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc + +# Run unitary tests and analyze coverage of code at same time +# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO -o log_cli=true --verbose \ + device/tests/test_unitary_openconfig_ocnos.py diff --git a/scripts/run_tests_locally-kpi-DB.sh b/scripts/run_tests_locally-kpi-DB.sh new file mode 100755 index 0000000000000000000000000000000000000000..4953b49e0a437becfda1648c722bcdcf92c58d93 --- /dev/null +++ b/scripts/run_tests_locally-kpi-DB.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +# RCFILE=$PROJECTDIR/coverage/.coveragerc +# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ +# kpi_manager/tests/test_unitary.py + +# python3 kpi_manager/tests/test_unitary.py + +RCFILE=$PROJECTDIR/coverage/.coveragerc +CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + kpi_manager/tests/test_kpi_db.py diff --git a/scripts/run_tests_locally-kpi-manager.sh b/scripts/run_tests_locally-kpi-manager.sh new file mode 100755 index 0000000000000000000000000000000000000000..8a4ce8d95c74657451147078a1d93e891dfc2ac8 --- /dev/null +++ b/scripts/run_tests_locally-kpi-manager.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +# RCFILE=$PROJECTDIR/coverage/.coveragerc +# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ +# kpi_manager/tests/test_unitary.py + +# python3 kpi_manager/tests/test_unitary.py + +RCFILE=$PROJECTDIR/coverage/.coveragerc +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + kpi_manager/tests/test_kpi_manager.py diff --git a/scripts/run_tests_locally-kpi-prom-writer.sh b/scripts/run_tests_locally-kpi-prom-writer.sh new file mode 100755 index 0000000000000000000000000000000000000000..8865a8a34495a032525c7585a409f4c32c7249df --- /dev/null +++ b/scripts/run_tests_locally-kpi-prom-writer.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src + +RCFILE=$PROJECTDIR/coverage/.coveragerc +CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + kpi_value_writer/tests/test_metric_writer_to_prom.py diff --git a/scripts/run_tests_locally-kpi-value-API.sh b/scripts/run_tests_locally-kpi-value-API.sh new file mode 100755 index 0000000000000000000000000000000000000000..3953d2a89c6fbe2bd3546e648246b9b018e5fdb0 --- /dev/null +++ b/scripts/run_tests_locally-kpi-value-API.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src + +RCFILE=$PROJECTDIR/coverage/.coveragerc +KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") +KFK_SERVER_ADDRESS=${KAFKA_IP}:9092 +# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG -o log_cli=true --verbose \ + kpi_value_api/tests/test_kpi_value_api.py diff --git a/scripts/run_tests_locally-kpi-value-writer.sh b/scripts/run_tests_locally-kpi-value-writer.sh new file mode 100755 index 0000000000000000000000000000000000000000..8faaeb6d895a240278d7ceb0c5c0b2855fa25910 --- /dev/null +++ b/scripts/run_tests_locally-kpi-value-writer.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src + +RCFILE=$PROJECTDIR/coverage/.coveragerc +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + kpi_value_writer/tests/test_kpi_value_writer.py diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh new file mode 100755 index 0000000000000000000000000000000000000000..4b9a417603cc42a4e7e8b19c7394cc38633817fa --- /dev/null +++ b/scripts/run_tests_locally-telemetry-DB.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +# RCFILE=$PROJECTDIR/coverage/.coveragerc +# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ +# kpi_manager/tests/test_unitary.py + +RCFILE=$PROJECTDIR/coverage/.coveragerc +python3 -m pytest --log-level=DEBUG --log-cli-level=debug --verbose \ + telemetry/tests/test_telemetryDB.py diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh new file mode 100755 index 0000000000000000000000000000000000000000..79db05fcf1259365e8a909ee99395eb59dfb9437 --- /dev/null +++ b/scripts/run_tests_locally-telemetry-backend.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +# RCFILE=$PROJECTDIR/coverage/.coveragerc +# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ +# kpi_manager/tests/test_unitary.py + +# python3 kpi_manager/tests/test_unitary.py + +RCFILE=$PROJECTDIR/coverage/.coveragerc +python3 -m pytest --log-level=INFO --log-cli-level=debug --verbose \ + telemetry/backend/tests/test_TelemetryBackend.py diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh new file mode 100755 index 0000000000000000000000000000000000000000..a2a1de52340cac527d4d1c446c76740d38ce7783 --- /dev/null +++ b/scripts/run_tests_locally-telemetry-frontend.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +# RCFILE=$PROJECTDIR/coverage/.coveragerc +# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ +# kpi_manager/tests/test_unitary.py + +# python3 kpi_manager/tests/test_unitary.py + +RCFILE=$PROJECTDIR/coverage/.coveragerc +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + telemetry/frontend/tests/test_frontend.py diff --git a/scripts/show_logs_analytics_backend.sh b/scripts/show_logs_analytics_backend.sh new file mode 100755 index 0000000000000000000000000000000000000000..afb58567ca5ab250da48d2cfffa2c56abdff2db2 --- /dev/null +++ b/scripts/show_logs_analytics_backend.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/analyticsservice -c backend diff --git a/scripts/show_logs_analytics_frontend.sh b/scripts/show_logs_analytics_frontend.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d3fae10b366f0082d3a393c224e8f1cb7830721 --- /dev/null +++ b/scripts/show_logs_analytics_frontend.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/analyticsservice -c frontend diff --git a/scripts/show_logs_kpi_manager.sh b/scripts/show_logs_kpi_manager.sh new file mode 100755 index 0000000000000000000000000000000000000000..86f084f69f6babf5a90957f432b214e35a08c461 --- /dev/null +++ b/scripts/show_logs_kpi_manager.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-managerservice -c server diff --git a/scripts/show_logs_kpi_value_api.sh b/scripts/show_logs_kpi_value_api.sh new file mode 100755 index 0000000000000000000000000000000000000000..041ad7f1ffb1a218af00d5d142024a5063d109c3 --- /dev/null +++ b/scripts/show_logs_kpi_value_api.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-value-apiservice -c server diff --git a/scripts/show_logs_kpi_value_writer.sh b/scripts/show_logs_kpi_value_writer.sh new file mode 100755 index 0000000000000000000000000000000000000000..d62f3ea0a1a6961be4a5b6f4841c9ba4e1a89316 --- /dev/null +++ b/scripts/show_logs_kpi_value_writer.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-value-writerservice -c server diff --git a/scripts/show_logs_telemetry-DB.sh b/scripts/show_logs_telemetry-DB.sh new file mode 100755 index 0000000000000000000000000000000000000000..84fc875d01e18eae9b144edaf220d5cb74017ea4 --- /dev/null +++ b/scripts/show_logs_telemetry-DB.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"crdb"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs cockroachdb-0 diff --git a/src/analytics/README.md b/src/analytics/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9663e5321ace6866491b90553553d9ccbf5793a1 --- /dev/null +++ b/src/analytics/README.md @@ -0,0 +1,4 @@ +# How to locally run and test Analytic service (To be added soon) + +### Pre-requisets +The following requirements should be fulfilled before the execuation of Telemetry service. diff --git a/src/analytics/__init__.py b/src/analytics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/analytics/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/analytics/backend/Dockerfile b/src/analytics/backend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..17adcd3ab1df5704cc7ef0c5a19b3cfb1539ee22 --- /dev/null +++ b/src/analytics/backend/Dockerfile @@ -0,0 +1,69 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/analytics/backend +WORKDIR /var/teraflow/analytics/backend +COPY src/analytics/backend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/analytics/__init__.py analytics/__init__.py +COPY src/analytics/backend/. analytics/backend/ + +# Start the service +ENTRYPOINT ["python", "-m", "analytics.backend.service"] diff --git a/src/analytics/backend/__init__.py b/src/analytics/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/analytics/backend/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/analytics/backend/requirements.in b/src/analytics/backend/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..9df678fe819f33d479b8f5090ca9ac4eb1f4047c --- /dev/null +++ b/src/analytics/backend/requirements.in @@ -0,0 +1,16 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +pyspark==3.5.2 +confluent-kafka==2.3.* diff --git a/src/analytics/backend/service/AnalyticsBackendService.py b/src/analytics/backend/service/AnalyticsBackendService.py new file mode 100755 index 0000000000000000000000000000000000000000..595603567fe537d9f7b33224cba0fe016a439631 --- /dev/null +++ b/src/analytics/backend/service/AnalyticsBackendService.py @@ -0,0 +1,132 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import logging +import threading +from common.tools.service.GenericGrpcService import GenericGrpcService +from analytics.backend.service.SparkStreaming import SparkStreamer +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import KafkaError +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc + + +LOGGER = logging.getLogger(__name__) + +class AnalyticsBackendService(GenericGrpcService): + """ + Class listens for ... + """ + def __init__(self, cls_name : str = __name__) -> None: + LOGGER.info('Init AnalyticsBackendService') + port = get_service_port_grpc(ServiceNameEnum.ANALYTICSBACKEND) + super().__init__(port, cls_name=cls_name) + self.running_threads = {} # To keep track of all running analyzers + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'analytics-frontend', + 'auto.offset.reset' : 'latest'}) + + def StartSparkStreamer(self, analyzer_uuid, analyzer): + kpi_list = analyzer['input_kpis'] + oper_list = [s.replace('_value', '') for s in list(analyzer["thresholds"].keys())] # TODO: update this line... + thresholds = analyzer['thresholds'] + window_size = analyzer['window_size'] + window_slider = analyzer['window_slider'] + print ("Received parameters: {:} - {:} - {:} - {:} - {:}".format( + kpi_list, oper_list, thresholds, window_size, window_slider)) + LOGGER.debug ("Received parameters: {:} - {:} - {:} - {:} - {:}".format( + kpi_list, oper_list, thresholds, window_size, window_slider)) + try: + stop_event = threading.Event() + thread = threading.Thread(target=SparkStreamer, + args=(analyzer_uuid, kpi_list, oper_list, thresholds, stop_event, + window_size, window_slider, None )) + self.running_threads[analyzer_uuid] = (thread, stop_event) + thread.start() + print ("Initiated Analyzer backend: {:}".format(analyzer_uuid)) + LOGGER.info("Initiated Analyzer backend: {:}".format(analyzer_uuid)) + return True + except Exception as e: + print ("Failed to initiate Analyzer backend: {:}".format(e)) + LOGGER.error("Failed to initiate Analyzer backend: {:}".format(e)) + return False + + def StopRequestListener(self, threadInfo: tuple): + try: + thread, stop_event = threadInfo + stop_event.set() + thread.join() + print ("Terminating Analytics backend RequestListener") + LOGGER.info("Terminating Analytics backend RequestListener") + return True + except Exception as e: + print ("Failed to terminate analytics backend {:}".format(e)) + LOGGER.error("Failed to terminate analytics backend {:}".format(e)) + return False + + def install_services(self): + stop_event = threading.Event() + thread = threading.Thread(target=self.RequestListener, + args=(stop_event,) ) + thread.start() + return (thread, stop_event) + + def RequestListener(self, stop_event): + """ + listener for requests on Kafka topic. + """ + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.ANALYTICS_REQUEST.value]) + while not stop_event.is_set(): + receive_msg = consumer.poll(2.0) + if receive_msg is None: + continue + elif receive_msg.error(): + if receive_msg.error().code() == KafkaError._PARTITION_EOF: + continue + else: + print("Consumer error: {}".format(receive_msg.error())) + break + analyzer = json.loads(receive_msg.value().decode('utf-8')) + analyzer_uuid = receive_msg.key().decode('utf-8') + LOGGER.debug('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer)) + print ('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer)) + + if analyzer["algo_name"] is None and analyzer["oper_mode"] is None: + self.TerminateAnalyzerBackend(analyzer_uuid) + else: + self.StartSparkStreamer(analyzer_uuid, analyzer) + LOGGER.debug("Stop Event activated. Terminating...") + print ("Stop Event activated. Terminating...") + + def TerminateAnalyzerBackend(self, analyzer_uuid): + if analyzer_uuid in self.running_threads: + try: + thread, stop_event = self.running_threads[analyzer_uuid] + stop_event.set() + thread.join() + del self.running_threads[analyzer_uuid] + print ("Terminating backend (by TerminateBackend): Analyzer Id: {:}".format(analyzer_uuid)) + LOGGER.info("Terminating backend (by TerminateBackend): Analyzer Id: {:}".format(analyzer_uuid)) + return True + except Exception as e: + LOGGER.error("Failed to terminate. Analyzer Id: {:} - ERROR: {:}".format(analyzer_uuid, e)) + return False + else: + print ("Analyzer not found in active collectors. Analyzer Id: {:}".format(analyzer_uuid)) + LOGGER.warning("Analyzer not found in active collectors: Analyzer Id: {:}".format(analyzer_uuid)) + # generate confirmation towards frontend diff --git a/src/analytics/backend/service/SparkStreaming.py b/src/analytics/backend/service/SparkStreaming.py new file mode 100644 index 0000000000000000000000000000000000000000..96e1aa05d898ffdd23c533b74ee87fbf03f54576 --- /dev/null +++ b/src/analytics/backend/service/SparkStreaming.py @@ -0,0 +1,154 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, time +from pyspark.sql import SparkSession +from pyspark.sql.types import StructType, StructField, StringType, DoubleType, TimestampType +from pyspark.sql.functions import from_json, col, window, avg, min, max, first, last, stddev, when, round +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic + +LOGGER = logging.getLogger(__name__) + +def DefiningSparkSession(): + # Create a Spark session with specific spark verions (3.5.0) + return SparkSession.builder \ + .appName("Analytics") \ + .config("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true") \ + .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.5.0") \ + .getOrCreate() + +def SettingKafkaConsumerParams(): # TODO: create get_kafka_consumer() in common with inputs (bootstrap server, subscribe, startingOffset and failOnDataLoss with default values) + return { + # "kafka.bootstrap.servers": '127.0.0.1:9092', + "kafka.bootstrap.servers": KafkaConfig.get_kafka_address(), + "subscribe" : KafkaTopic.VALUE.value, + "startingOffsets" : 'latest', + "failOnDataLoss" : 'false' # Optional: Set to "true" to fail the query on data loss + } + +def DefiningRequestSchema(): + return StructType([ + StructField("time_stamp" , StringType() , True), + StructField("kpi_id" , StringType() , True), + StructField("kpi_value" , DoubleType() , True) + ]) + +def GetAggregations(oper_list): + # Define the possible aggregation functions + agg_functions = { + 'avg' : round(avg ("kpi_value"), 3) .alias("avg_value"), + 'min' : round(min ("kpi_value"), 3) .alias("min_value"), + 'max' : round(max ("kpi_value"), 3) .alias("max_value"), + 'first': round(first ("kpi_value"), 3) .alias("first_value"), + 'last' : round(last ("kpi_value"), 3) .alias("last_value"), + 'stdev': round(stddev ("kpi_value"), 3) .alias("stdev_value") + } + return [agg_functions[op] for op in oper_list if op in agg_functions] # Filter and return only the selected aggregations + +def ApplyThresholds(aggregated_df, thresholds): + # Apply thresholds (TH-Fail and TH-RAISE) based on the thresholds dictionary on the aggregated DataFrame. + + # Loop through each column name and its associated thresholds + for col_name, (fail_th, raise_th) in thresholds.items(): + # Apply TH-Fail condition (if column value is less than the fail threshold) + aggregated_df = aggregated_df.withColumn( + f"{col_name}_THRESHOLD_FAIL", + when(col(col_name) < fail_th, True).otherwise(False) + ) + # Apply TH-RAISE condition (if column value is greater than the raise threshold) + aggregated_df = aggregated_df.withColumn( + f"{col_name}_THRESHOLD_RAISE", + when(col(col_name) > raise_th, True).otherwise(False) + ) + return aggregated_df + +def SparkStreamer(key, kpi_list, oper_list, thresholds, stop_event, + window_size=None, win_slide_duration=None, time_stamp_col=None): + """ + Method to perform Spark operation Kafka stream. + NOTE: Kafka topic to be processesd should have atleast one row before initiating the spark session. + """ + kafka_consumer_params = SettingKafkaConsumerParams() # Define the Kafka consumer parameters + schema = DefiningRequestSchema() # Define the schema for the incoming JSON data + spark = DefiningSparkSession() # Define the spark session with app name and spark version + + # extra options default assignment + if window_size is None: window_size = "60 seconds" # default + if win_slide_duration is None: win_slide_duration = "30 seconds" # default + if time_stamp_col is None: time_stamp_col = "time_stamp" # default + + try: + # Read data from Kafka + raw_stream_data = spark \ + .readStream \ + .format("kafka") \ + .options(**kafka_consumer_params) \ + .load() + + # Convert the value column from Kafka to a string + stream_data = raw_stream_data.selectExpr("CAST(value AS STRING)") + # Parse the JSON string into a DataFrame with the defined schema + parsed_stream_data = stream_data.withColumn("parsed_value", from_json(col("value"), schema)) + # Select the parsed fields + final_stream_data = parsed_stream_data.select("parsed_value.*") + # Convert the time_stamp to proper timestamp (assuming it's in ISO format) + final_stream_data = final_stream_data.withColumn(time_stamp_col, col(time_stamp_col).cast(TimestampType())) + # Filter the stream to only include rows where the kpi_id is in the kpi_list + filtered_stream_data = final_stream_data.filter(col("kpi_id").isin(kpi_list)) + # Define a window for aggregation + windowed_stream_data = filtered_stream_data \ + .groupBy( + window( col(time_stamp_col), + window_size, slideDuration=win_slide_duration + ), + col("kpi_id") + ) \ + .agg(*GetAggregations(oper_list)) + # Apply thresholds to the aggregated data + thresholded_stream_data = ApplyThresholds(windowed_stream_data, thresholds) + + # --- This will write output on console: FOR TESTING PURPOSES + # Start the Spark streaming query + # query = thresholded_stream_data \ + # .writeStream \ + # .outputMode("update") \ + # .format("console") + + # --- This will write output to Kafka: ACTUAL IMPLEMENTATION + query = thresholded_stream_data \ + .selectExpr(f"'{key}' AS key", "to_json(struct(*)) AS value") \ + .writeStream \ + .format("kafka") \ + .option("kafka.bootstrap.servers", KafkaConfig.get_kafka_address()) \ + .option("topic", KafkaTopic.ANALYTICS_RESPONSE.value) \ + .option("checkpointLocation", "analytics/.spark/checkpoint") \ + .outputMode("update") + + # Start the query execution + queryHandler = query.start() + + # Loop to check for stop event flag. To be set by stop collector method. + while True: + if stop_event.is_set(): + LOGGER.debug("Stop Event activated. Terminating in 5 seconds...") + print ("Stop Event activated. Terminating in 5 seconds...") + time.sleep(5) + queryHandler.stop() + break + time.sleep(5) + + except Exception as e: + print("Error in Spark streaming process: {:}".format(e)) + LOGGER.debug("Error in Spark streaming process: {:}".format(e)) diff --git a/src/analytics/backend/service/__init__.py b/src/analytics/backend/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/analytics/backend/service/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/analytics/backend/service/__main__.py b/src/analytics/backend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4c36b7c7bd952164bf9e48a45e22fb00575564 --- /dev/null +++ b/src/analytics/backend/service/__main__.py @@ -0,0 +1,56 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port +from .AnalyticsBackendService import AnalyticsBackendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + grpc_service = AnalyticsBackendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/analytics/backend/tests/__init__.py b/src/analytics/backend/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/analytics/backend/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/analytics/backend/tests/messages.py b/src/analytics/backend/tests/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..9acd6ad9dffe4a5b10b107a6923ed85170ee141f --- /dev/null +++ b/src/analytics/backend/tests/messages.py @@ -0,0 +1,34 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def get_kpi_id_list(): + return ["6e22f180-ba28-4641-b190-2287bf448888", "1e22f180-ba28-4641-b190-2287bf446666"] + +def get_operation_list(): + return [ 'avg', 'max' ] # possibilities ['avg', 'min', 'max', 'first', 'last', 'stdev'] + +def get_threshold_dict(): + threshold_dict = { + 'avg_value' : (20, 30), + 'min_value' : (00, 10), + 'max_value' : (45, 50), + 'first_value' : (00, 10), + 'last_value' : (40, 50), + 'stdev_value' : (00, 10), + } + # Filter threshold_dict based on the operation_list + return { + op + '_value': threshold_dict[op+'_value'] for op in get_operation_list() if op + '_value' in threshold_dict + } diff --git a/src/analytics/backend/tests/test_backend.py b/src/analytics/backend/tests/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..2f40faba94ef7081db609116e8fd869e3d119a24 --- /dev/null +++ b/src/analytics/backend/tests/test_backend.py @@ -0,0 +1,64 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import logging +import threading +from common.tools.kafka.Variables import KafkaTopic +from analytics.backend.service.AnalyticsBackendService import AnalyticsBackendService +from analytics.backend.tests.messages import get_kpi_id_list, get_operation_list, get_threshold_dict + +LOGGER = logging.getLogger(__name__) + + +########################### +# Tests Implementation of Telemetry Backend +########################### + +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +# def test_StartRequestListener(): +# LOGGER.info('test_RunRequestListener') +# AnalyticsBackendServiceObj = AnalyticsBackendService() +# response = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event) +# LOGGER.debug(str(response)) +# assert isinstance(response, tuple) + +# To test START and STOP communication together +def test_StopRequestListener(): + LOGGER.info('test_RunRequestListener') + LOGGER.info('Initiating StartRequestListener...') + AnalyticsBackendServiceObj = AnalyticsBackendService() + response_thread = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event) + # LOGGER.debug(str(response_thread)) + time.sleep(10) + LOGGER.info('Initiating StopRequestListener...') + AnalyticsBackendServiceObj = AnalyticsBackendService() + response = AnalyticsBackendServiceObj.StopRequestListener(response_thread) + LOGGER.debug(str(response)) + assert isinstance(response, bool) + +# To independently tests the SparkListener functionality +# def test_SparkListener(): +# LOGGER.info('test_RunRequestListener') +# AnalyticsBackendServiceObj = AnalyticsBackendService() +# response = AnalyticsBackendServiceObj.RunSparkStreamer( +# get_kpi_id_list(), get_operation_list(), get_threshold_dict() +# ) +# LOGGER.debug(str(response)) +# assert isinstance(response, bool) diff --git a/src/analytics/database/AnalyzerEngine.py b/src/analytics/database/AnalyzerEngine.py new file mode 100644 index 0000000000000000000000000000000000000000..9294e09966ef9e13c9cfa3cab590e5d0c8b6a80e --- /dev/null +++ b/src/analytics/database/AnalyzerEngine.py @@ -0,0 +1,40 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sqlalchemy +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' + +class AnalyzerEngine: + @staticmethod + def get_engine() -> sqlalchemy.engine.Engine: + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = "tfs-analyzer" # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) + try: + engine = sqlalchemy.create_engine(crdb_uri, echo=False) + LOGGER.info(' AnalyzerDB initalized with DB URL: {:}'.format(crdb_uri)) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) + return None # type: ignore + return engine diff --git a/src/analytics/database/AnalyzerModel.py b/src/analytics/database/AnalyzerModel.py new file mode 100644 index 0000000000000000000000000000000000000000..c33e396e06a8dce96a86951a64aa59b510931dfe --- /dev/null +++ b/src/analytics/database/AnalyzerModel.py @@ -0,0 +1,106 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import enum + +from sqlalchemy import Column, String, Float, Enum, BigInteger, JSON +from sqlalchemy.orm import registry +from common.proto import analytics_frontend_pb2 +from common.proto import kpi_manager_pb2 + +from sqlalchemy.dialects.postgresql import UUID, ARRAY + + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +# Create a base class for declarative models +Base = registry().generate_base() + +class AnalyzerOperationMode (enum.Enum): + BATCH = analytics_frontend_pb2.AnalyzerOperationMode.ANALYZEROPERATIONMODE_BATCH + STREAMING = analytics_frontend_pb2.AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING + +class Analyzer(Base): + __tablename__ = 'analyzer' + + analyzer_id = Column( UUID(as_uuid=False) , primary_key=True) + algorithm_name = Column( String , nullable=False ) + input_kpi_ids = Column( ARRAY(UUID(as_uuid=False)) , nullable=False ) + output_kpi_ids = Column( ARRAY(UUID(as_uuid=False)) , nullable=False ) + operation_mode = Column( Enum(AnalyzerOperationMode), nullable=False ) + parameters = Column( JSON , nullable=True ) + batch_min_duration_s = Column( Float , nullable=False ) + batch_max_duration_s = Column( Float , nullable=False ) + batch_min_size = Column( BigInteger , nullable=False ) + batch_max_size = Column( BigInteger , nullable=False ) + + # helps in logging the information + def __repr__(self): + return (f"<Analyzer(analyzer_id='{self.analyzer_id}' , algorithm_name='{self.algorithm_name}', " + f"input_kpi_ids={self.input_kpi_ids} , output_kpi_ids={self.output_kpi_ids}, " + f"operation_mode='{self.operation_mode}' , parameters={self.parameters}, " + f"batch_min_duration_s={self.batch_min_duration_s} , batch_max_duration_s={self.batch_max_duration_s}, " + f"batch_min_size={self.batch_min_size} , batch_max_size={self.batch_max_size})>") + + + @classmethod + def ConvertAnalyzerToRow(cls, request): + """ + Create an instance of Analyzer table rows from a request object. + Args: request: The request object containing analyzer gRPC message. + Returns: A row (an instance of Analyzer table) initialized with content of the request. + """ + return cls( + analyzer_id = request.analyzer_id.analyzer_id.uuid, + algorithm_name = request.algorithm_name, + input_kpi_ids = [k.kpi_id.uuid for k in request.input_kpi_ids], + output_kpi_ids = [k.kpi_id.uuid for k in request.output_kpi_ids], + operation_mode = AnalyzerOperationMode(request.operation_mode), # converts integer to coresponding Enum class member + parameters = dict(request.parameters), + batch_min_duration_s = request.batch_min_duration_s, + batch_max_duration_s = request.batch_max_duration_s, + batch_min_size = request.batch_min_size, + batch_max_size = request.batch_max_size + ) + + @classmethod + def ConvertRowToAnalyzer(cls, row): + """ + Create and return an Analyzer gRPC message initialized with the content of a row. + Args: row: The Analyzer table instance (row) containing the data. + Returns: An Analyzer gRPC message initialized with the content of the row. + """ + # Create an instance of the Analyzer message + response = analytics_frontend_pb2.Analyzer() + response.analyzer_id.analyzer_id.uuid = row.analyzer_id + response.algorithm_name = row.algorithm_name + response.operation_mode = row.operation_mode.value + response.parameters.update(row.parameters) + + for input_kpi_id in row.input_kpi_ids: + _kpi_id = kpi_manager_pb2.KpiId() + _kpi_id.kpi_id.uuid = input_kpi_id + response.input_kpi_ids.append(_kpi_id) + for output_kpi_id in row.output_kpi_ids: + _kpi_id = kpi_manager_pb2.KpiId() + _kpi_id.kpi_id.uuid = output_kpi_id + response.output_kpi_ids.append(_kpi_id) + + response.batch_min_duration_s = row.batch_min_duration_s + response.batch_max_duration_s = row.batch_max_duration_s + response.batch_min_size = row.batch_min_size + response.batch_max_size = row.batch_max_size + return response diff --git a/src/analytics/database/Analyzer_DB.py b/src/analytics/database/Analyzer_DB.py new file mode 100644 index 0000000000000000000000000000000000000000..1ba68989a066e4638adc12e65289ed50b740731d --- /dev/null +++ b/src/analytics/database/Analyzer_DB.py @@ -0,0 +1,150 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sqlalchemy_utils + +from sqlalchemy import inspect, or_ +from sqlalchemy.orm import sessionmaker + +from analytics.database.AnalyzerModel import Analyzer as AnalyzerModel +from analytics.database.AnalyzerEngine import AnalyzerEngine +from common.method_wrappers.ServiceExceptions import (OperationFailedException, AlreadyExistsException) + +LOGGER = logging.getLogger(__name__) +DB_NAME = "tfs_analyzer" # TODO: export name from enviornment variable + +class AnalyzerDB: + def __init__(self): + self.db_engine = AnalyzerEngine.get_engine() + if self.db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return False + self.db_name = DB_NAME + self.Session = sessionmaker(bind=self.db_engine) + + def create_database(self): + if not sqlalchemy_utils.database_exists(self.db_engine.url): + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) + sqlalchemy_utils.create_database(self.db_engine.url) + + def drop_database(self) -> None: + if sqlalchemy_utils.database_exists(self.db_engine.url): + sqlalchemy_utils.drop_database(self.db_engine.url) + + def create_tables(self): + try: + AnalyzerModel.metadata.create_all(self.db_engine) # type: ignore + LOGGER.debug("Tables created in the database: {:}".format(self.db_name)) + except Exception as e: + LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e))) + raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)]) + + def verify_tables(self): + try: + inspect_object = inspect(self.db_engine) + if(inspect_object.has_table('analyzer', None)): + LOGGER.info("Table exists in DB: {:}".format(self.db_name)) + except Exception as e: + LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) + +# ----------------- CURD OPERATIONS --------------------- + + def add_row_to_db(self, row): + session = self.Session() + try: + session.add(row) + session.commit() + LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.") + return True + except Exception as e: + session.rollback() + if "psycopg2.errors.UniqueViolation" in str(e): + LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") + raise AlreadyExistsException(row.__class__.__name__, row, + extra_details=["Unique key voilation: {:}".format(e)] ) + else: + LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def search_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + entity = session.query(model).filter_by(**{col_name: id_to_search}).first() + if entity: + # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}") + return entity + else: + LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}") + print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) + return None + except Exception as e: + session.rollback() + LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") + raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) + finally: + session.close() + + def delete_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + record = session.query(model).filter_by(**{col_name: id_to_search}).first() + if record: + session.delete(record) + session.commit() + LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) + else: + LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search) + return None + except Exception as e: + session.rollback() + LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def select_with_filter(self, model, filter_object): + session = self.Session() + try: + query = session.query(AnalyzerModel) + + # Apply filters based on the filter_object + if filter_object.analyzer_id: + query = query.filter(AnalyzerModel.analyzer_id.in_([a.analyzer_id.uuid for a in filter_object.analyzer_id])) + + if filter_object.algorithm_names: + query = query.filter(AnalyzerModel.algorithm_name.in_(filter_object.algorithm_names)) + + if filter_object.input_kpi_ids: + input_kpi_uuids = [k.kpi_id.uuid for k in filter_object.input_kpi_ids] + query = query.filter(AnalyzerModel.input_kpi_ids.op('&&')(input_kpi_uuids)) + + if filter_object.output_kpi_ids: + output_kpi_uuids = [k.kpi_id.uuid for k in filter_object.output_kpi_ids] + query = query.filter(AnalyzerModel.output_kpi_ids.op('&&')(output_kpi_uuids)) + + result = query.all() + # query should be added to return all rows + if result: + LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") # - Results: {result} + else: + LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}") + return result + except Exception as e: + LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}") + raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)]) + finally: + session.close() diff --git a/src/analytics/database/__init__.py b/src/analytics/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/database/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/Dockerfile b/src/analytics/frontend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..10499713f318a23e1aeab49c96e8163a5ec147fa --- /dev/null +++ b/src/analytics/frontend/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/analytics/frontend +WORKDIR /var/teraflow/analytics/frontend +COPY src/analytics/frontend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/analytics/__init__.py analytics/__init__.py +COPY src/analytics/frontend/. analytics/frontend/ +COPY src/analytics/database/. analytics/database/ + +# Start the service +ENTRYPOINT ["python", "-m", "analytics.frontend.service"] diff --git a/src/analytics/frontend/__init__.py b/src/analytics/frontend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/client/AnalyticsFrontendClient.py b/src/analytics/frontend/client/AnalyticsFrontendClient.py new file mode 100644 index 0000000000000000000000000000000000000000..90e95d661d46f24ae5ffaeb7bcfa19b7e1f36526 --- /dev/null +++ b/src/analytics/frontend/client/AnalyticsFrontendClient.py @@ -0,0 +1,68 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.Constants import ServiceNameEnum +from common.proto.context_pb2 import Empty +from common.proto.analytics_frontend_pb2_grpc import AnalyticsFrontendServiceStub +from common.proto.analytics_frontend_pb2 import AnalyzerId, Analyzer, AnalyzerFilter, AnalyzerList +from common.Settings import get_service_host, get_service_port_grpc +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.client.RetryDecorator import retry, delay_exponential + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 10 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class AnalyticsFrontendClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.ANALYTICSFRONTEND) + if not port: port = get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = AnalyticsFrontendServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def StartAnalyzer (self, request: Analyzer) -> AnalyzerId: #type: ignore + LOGGER.debug('StartAnalyzer: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.StartAnalyzer(request) + LOGGER.debug('StartAnalyzer result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def StopAnalyzer(self, request : AnalyzerId) -> Empty: # type: ignore + LOGGER.debug('StopAnalyzer: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.StopAnalyzer(request) + LOGGER.debug('StopAnalyzer result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SelectAnalyzers(self, request : AnalyzerFilter) -> AnalyzerList: # type: ignore + LOGGER.debug('SelectAnalyzers: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SelectAnalyzers(request) + LOGGER.debug('SelectAnalyzers result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/analytics/frontend/client/__init__.py b/src/analytics/frontend/client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/requirements.in b/src/analytics/frontend/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..d81b9ddbeafeff94c830d48ca5594e775b9ce240 --- /dev/null +++ b/src/analytics/frontend/requirements.in @@ -0,0 +1,20 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apscheduler==3.10.4 +confluent-kafka==2.3.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/analytics/frontend/service/AnalyticsFrontendService.py b/src/analytics/frontend/service/AnalyticsFrontendService.py new file mode 100644 index 0000000000000000000000000000000000000000..42a7fc9b60418c1c0fc5af6f320ae5c330ce8871 --- /dev/null +++ b/src/analytics/frontend/service/AnalyticsFrontendService.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.service.GenericGrpcService import GenericGrpcService +from common.proto.analytics_frontend_pb2_grpc import add_AnalyticsFrontendServiceServicer_to_server +from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl + +class AnalyticsFrontendService(GenericGrpcService): + def __init__(self, cls_name: str = __name__): + port = get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND) + super().__init__(port, cls_name=cls_name) + self.analytics_frontend_servicer = AnalyticsFrontendServiceServicerImpl() + + def install_servicers(self): + add_AnalyticsFrontendServiceServicer_to_server(self.analytics_frontend_servicer, self.server) diff --git a/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..8bb6a17afb5b911e3652fdb8d1853b5b7bc6faf3 --- /dev/null +++ b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py @@ -0,0 +1,214 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, grpc, json, queue + +from typing import Dict +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import Producer as KafkaProducer +from confluent_kafka import KafkaError + +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from common.proto.context_pb2 import Empty +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.analytics_frontend_pb2 import Analyzer, AnalyzerId, AnalyzerFilter, AnalyzerList +from common.proto.analytics_frontend_pb2_grpc import AnalyticsFrontendServiceServicer +from analytics.database.Analyzer_DB import AnalyzerDB +from analytics.database.AnalyzerModel import Analyzer as AnalyzerModel +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.interval import IntervalTrigger + +LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('AnalyticsFrontend', 'NBIgRPC') + +class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer): + def __init__(self): + LOGGER.info('Init AnalyticsFrontendService') + self.listener_topic = KafkaTopic.ANALYTICS_RESPONSE.value + self.db_obj = AnalyzerDB() + self.result_queue = queue.Queue() + self.scheduler = BackgroundScheduler() + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'analytics-frontend', + 'auto.offset.reset' : 'latest'}) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StartAnalyzer(self, + request : Analyzer, grpc_context: grpc.ServicerContext # type: ignore + ) -> AnalyzerId: # type: ignore + LOGGER.info ("At Service gRPC message: {:}".format(request)) + response = AnalyzerId() + + self.db_obj.add_row_to_db( + AnalyzerModel.ConvertAnalyzerToRow(request) + ) + self.PublishStartRequestOnKafka(request) + + response.analyzer_id.uuid = request.analyzer_id.analyzer_id.uuid + return response + + def PublishStartRequestOnKafka(self, analyzer_obj): + """ + Method to generate analyzer request on Kafka. + """ + analyzer_uuid = analyzer_obj.analyzer_id.analyzer_id.uuid + analyzer_to_generate : Dict = { + "algo_name" : analyzer_obj.algorithm_name, + "input_kpis" : [k.kpi_id.uuid for k in analyzer_obj.input_kpi_ids], + "output_kpis" : [k.kpi_id.uuid for k in analyzer_obj.output_kpi_ids], + "oper_mode" : analyzer_obj.operation_mode, + "thresholds" : json.loads(analyzer_obj.parameters["thresholds"]), + "window_size" : analyzer_obj.parameters["window_size"], + "window_slider" : analyzer_obj.parameters["window_slider"], + # "store_aggregate" : analyzer_obj.parameters["store_aggregate"] + } + self.kafka_producer.produce( + KafkaTopic.ANALYTICS_REQUEST.value, + key = analyzer_uuid, + value = json.dumps(analyzer_to_generate), + callback = self.delivery_callback + ) + LOGGER.info("Analyzer Start Request Generated: Analyzer Id: {:}, Value: {:}".format(analyzer_uuid, analyzer_to_generate)) + self.kafka_producer.flush() + + # self.StartResponseListener(analyzer_uuid) + + def StartResponseListener(self, filter_key=None): + """ + Start the Kafka response listener with APScheduler and return key-value pairs periodically. + """ + LOGGER.info("Starting StartResponseListener") + # Schedule the ResponseListener at fixed intervals + self.scheduler.add_job( + self.response_listener, + trigger=IntervalTrigger(seconds=5), + args=[filter_key], + id=f"response_listener_{self.listener_topic}", + replace_existing=True + ) + self.scheduler.start() + LOGGER.info(f"Started Kafka listener for topic {self.listener_topic}...") + try: + while True: + LOGGER.info("entering while...") + key, value = self.result_queue.get() # Wait until a result is available + LOGGER.info("In while true ...") + yield key, value # Yield the result to the calling function + except KeyboardInterrupt: + LOGGER.warning("Listener stopped manually.") + finally: + self.StopListener() + + def response_listener(self, filter_key=None): + """ + Poll Kafka messages and put key-value pairs into the queue. + """ + LOGGER.info(f"Polling Kafka topic {self.listener_topic}...") + + consumer = self.kafka_consumer + consumer.subscribe([self.listener_topic]) + msg = consumer.poll(2.0) + if msg is None: + return + elif msg.error(): + if msg.error().code() != KafkaError._PARTITION_EOF: + LOGGER.error(f"Kafka error: {msg.error()}") + return + + try: + key = msg.key().decode('utf-8') if msg.key() else None + if filter_key is not None and key == filter_key: + value = json.loads(msg.value().decode('utf-8')) + LOGGER.info(f"Received key: {key}, value: {value}") + self.result_queue.put((key, value)) + else: + LOGGER.info(f"Skipping message with unmatched key: {key}") + # value = json.loads(msg.value().decode('utf-8')) # Added for debugging + # self.result_queue.put((filter_key, value)) # Added for debugging + except Exception as e: + LOGGER.error(f"Error processing Kafka message: {e}") + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StopAnalyzer(self, + request : AnalyzerId, grpc_context: grpc.ServicerContext # type: ignore + ) -> Empty: # type: ignore + LOGGER.info ("At Service gRPC message: {:}".format(request)) + try: + analyzer_id_to_delete = request.analyzer_id.uuid + self.db_obj.delete_db_row_by_id( + AnalyzerModel, "analyzer_id", analyzer_id_to_delete + ) + self.PublishStopRequestOnKafka(analyzer_id_to_delete) + except Exception as e: + LOGGER.error('Unable to delete analyzer. Error: {:}'.format(e)) + return Empty() + + def PublishStopRequestOnKafka(self, analyzer_uuid): + """ + Method to generate stop analyzer request on Kafka. + """ + # analyzer_uuid = analyzer_id.analyzer_id.uuid + analyzer_to_stop : Dict = { + "algo_name" : None, + "input_kpis" : [], + "output_kpis" : [], + "oper_mode" : None + } + self.kafka_producer.produce( + KafkaTopic.ANALYTICS_REQUEST.value, + key = analyzer_uuid, + value = json.dumps(analyzer_to_stop), + callback = self.delivery_callback + ) + LOGGER.info("Analyzer Stop Request Generated: Analyzer Id: {:}".format(analyzer_uuid)) + self.kafka_producer.flush() + self.StopListener() + + def StopListener(self): + """ + Gracefully stop the Kafka listener and the scheduler. + """ + LOGGER.info("Stopping Kafka listener...") + self.scheduler.shutdown() + LOGGER.info("Kafka listener stopped.") + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SelectAnalyzers(self, + filter : AnalyzerFilter, contextgrpc_context: grpc.ServicerContext # type: ignore + ) -> AnalyzerList: # type: ignore + LOGGER.info("At Service gRPC message: {:}".format(filter)) + response = AnalyzerList() + try: + rows = self.db_obj.select_with_filter(AnalyzerModel, filter) + try: + for row in rows: + response.analyzer_list.append( + AnalyzerModel.ConvertRowToAnalyzer(row) + ) + return response + except Exception as e: + LOGGER.info('Unable to process filter response {:}'.format(e)) + except Exception as e: + LOGGER.error('Unable to apply filter on table {:}. ERROR: {:}'.format(AnalyzerModel.__name__, e)) + + + def delivery_callback(self, err, msg): + if err: + LOGGER.debug('Message delivery failed: {:}'.format(err)) + print ('Message delivery failed: {:}'.format(err)) + # else: + # LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) + # print('Message delivered to topic {:}'.format(msg.topic())) diff --git a/src/analytics/frontend/service/__init__.py b/src/analytics/frontend/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/service/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/service/__main__.py b/src/analytics/frontend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c331844f45d98095ef98951f3db43a0e2f0c69c --- /dev/null +++ b/src/analytics/frontend/service/__main__.py @@ -0,0 +1,56 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port +from .AnalyticsFrontendService import AnalyticsFrontendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + grpc_service = AnalyticsFrontendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/analytics/frontend/tests/__init__.py b/src/analytics/frontend/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/frontend/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/frontend/tests/messages.py b/src/analytics/frontend/tests/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..646de962e8a213582fdb7cd1446ab57bda561a96 --- /dev/null +++ b/src/analytics/frontend/tests/messages.py @@ -0,0 +1,84 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import json +from common.proto.kpi_manager_pb2 import KpiId +from common.proto.analytics_frontend_pb2 import ( AnalyzerOperationMode, AnalyzerId, + Analyzer, AnalyzerFilter ) + +def create_analyzer_id(): + _create_analyzer_id = AnalyzerId() + # _create_analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) + _create_analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6" + return _create_analyzer_id + +def create_analyzer(): + _create_analyzer = Analyzer() + # _create_analyzer.analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) + _create_analyzer.analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6" + _create_analyzer.algorithm_name = "Test_Aggergate_and_Threshold" + _create_analyzer.operation_mode = AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING + + _kpi_id = KpiId() + # input IDs to analyze + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf448888" + _create_analyzer.input_kpi_ids.append(_kpi_id) + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _kpi_id.kpi_id.uuid = "1e22f180-ba28-4641-b190-2287bf446666" + _create_analyzer.input_kpi_ids.append(_kpi_id) + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_analyzer.input_kpi_ids.append(_kpi_id) + # output IDs after analysis + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_analyzer.output_kpi_ids.append(_kpi_id) + _kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_analyzer.output_kpi_ids.append(_kpi_id) + # parameter + _threshold_dict = { + # 'avg_value' :(20, 30), 'min_value' :(00, 10), 'max_value' :(45, 50), + 'first_value' :(00, 10), 'last_value' :(40, 50), 'stdev_value':(00, 10)} + _create_analyzer.parameters['thresholds'] = json.dumps(_threshold_dict) + _create_analyzer.parameters['window_size'] = "60 seconds" # Such as "10 seconds", "2 minutes", "3 hours", "4 days" or "5 weeks" + _create_analyzer.parameters['window_slider'] = "30 seconds" # should be less than window size + _create_analyzer.parameters['store_aggregate'] = str(False) # TRUE to store. No implemented yet + + return _create_analyzer + +def create_analyzer_filter(): + _create_analyzer_filter = AnalyzerFilter() + + _analyzer_id_obj = AnalyzerId() + # _analyzer_id_obj.analyzer_id.uuid = str(uuid.uuid4()) + _analyzer_id_obj.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6" + _create_analyzer_filter.analyzer_id.append(_analyzer_id_obj) + + _create_analyzer_filter.algorithm_names.append('Test_Aggergate_and_Threshold') + + # _input_kpi_id_obj = KpiId() + # _input_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.input_kpi_ids.append(_input_kpi_id_obj) + # another input kpi Id + # _input_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.input_kpi_ids.append(_input_kpi_id_obj) + + # _output_kpi_id_obj = KpiId() + # _output_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.output_kpi_ids.append(_output_kpi_id_obj) + # # another output kpi Id + # _output_kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + # _create_analyzer_filter.input_kpi_ids.append(_output_kpi_id_obj) + + return _create_analyzer_filter diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py new file mode 100644 index 0000000000000000000000000000000000000000..d2428c01fb021f71a884d9a99c446bfef6e66559 --- /dev/null +++ b/src/analytics/frontend/tests/test_frontend.py @@ -0,0 +1,134 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +import json +import pytest +import logging +import threading + +from common.Constants import ServiceNameEnum +from common.proto.context_pb2 import Empty +from common.Settings import ( get_service_port_grpc, get_env_var_name, + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC ) + +from common.tools.kafka.Variables import KafkaTopic +from common.proto.analytics_frontend_pb2 import AnalyzerId, AnalyzerList +from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient +from analytics.frontend.service.AnalyticsFrontendService import AnalyticsFrontendService +from analytics.frontend.tests.messages import ( create_analyzer_id, create_analyzer, + create_analyzer_filter ) +from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.interval import IntervalTrigger + + +########################### +# Tests Setup +########################### + +LOCAL_HOST = '127.0.0.1' + +ANALYTICS_FRONTEND_PORT = str(get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND)) +os.environ[get_env_var_name(ServiceNameEnum.ANALYTICSFRONTEND, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.ANALYTICSFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(ANALYTICS_FRONTEND_PORT) + +LOGGER = logging.getLogger(__name__) + +@pytest.fixture(scope='session') +def analyticsFrontend_service(): + LOGGER.info('Initializing AnalyticsFrontendService...') + + _service = AnalyticsFrontendService() + _service.start() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding AnalyticsFrontendService...') + yield _service + + LOGGER.info('Terminating AnalyticsFrontendService...') + _service.stop() + + LOGGER.info('Terminated AnalyticsFrontendService...') + +@pytest.fixture(scope='session') +def analyticsFrontend_client(analyticsFrontend_service : AnalyticsFrontendService): + LOGGER.info('Initializing AnalyticsFrontendClient...') + + _client = AnalyticsFrontendClient() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding AnalyticsFrontendClient...') + yield _client + + LOGGER.info('Closing AnalyticsFrontendClient...') + _client.close() + + LOGGER.info('Closed AnalyticsFrontendClient...') + + +########################### +# Tests Implementation of Analytics Frontend +########################### + +# --- "test_validate_kafka_topics" should be executed before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +# ----- core funtionality test ----- +# def test_StartAnalytics(analyticsFrontend_client): +# LOGGER.info(' >>> test_StartAnalytic START: <<< ') +# response = analyticsFrontend_client.StartAnalyzer(create_analyzer()) +# LOGGER.debug(str(response)) +# assert isinstance(response, AnalyzerId) + +# To test start and stop listener together +def test_StartStopAnalyzers(analyticsFrontend_client): + LOGGER.info(' >>> test_StartStopAnalyzers START: <<< ') + LOGGER.info('--> StartAnalyzer') + added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer()) + LOGGER.debug(str(added_analyzer_id)) + LOGGER.info(' --> Calling StartResponseListener... ') + class_obj = AnalyticsFrontendServiceServicerImpl() + response = class_obj.StartResponseListener(added_analyzer_id.analyzer_id._uuid) + LOGGER.debug(response) + LOGGER.info("waiting for timer to comlete ...") + time.sleep(3) + LOGGER.info('--> StopAnalyzer') + response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id) + LOGGER.debug(str(response)) + +# def test_SelectAnalytics(analyticsFrontend_client): +# LOGGER.info(' >>> test_SelectAnalytics START: <<< ') +# response = analyticsFrontend_client.SelectAnalyzers(create_analyzer_filter()) +# LOGGER.debug(str(response)) +# assert isinstance(response, AnalyzerList) + +# def test_StopAnalytic(analyticsFrontend_client): +# LOGGER.info(' >>> test_StopAnalytic START: <<< ') +# response = analyticsFrontend_client.StopAnalyzer(create_analyzer_id()) +# LOGGER.debug(str(response)) +# assert isinstance(response, Empty) + +# def test_ResponseListener(): +# LOGGER.info(' >>> test_ResponseListener START <<< ') +# analyzer_id = create_analyzer_id() +# LOGGER.debug("Starting Response Listener for Analyzer ID: {:}".format(analyzer_id.analyzer_id.uuid)) +# class_obj = AnalyticsFrontendServiceServicerImpl() +# for response in class_obj.StartResponseListener(analyzer_id.analyzer_id.uuid): +# LOGGER.debug(response) +# assert isinstance(response, tuple) \ No newline at end of file diff --git a/src/analytics/requirements.in b/src/analytics/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..8ff30ddaad25c39713f2e6f68c8d9aebed74dad0 --- /dev/null +++ b/src/analytics/requirements.in @@ -0,0 +1,21 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +java==11.0.* +pyspark==3.5.2 +confluent-kafka==2.3.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/analytics/tests/__init__.py b/src/analytics/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/analytics/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/analytics/tests/test_analytics_db.py b/src/analytics/tests/test_analytics_db.py new file mode 100644 index 0000000000000000000000000000000000000000..58e7d0167044bb461e66b053dcb3999641ea8419 --- /dev/null +++ b/src/analytics/tests/test_analytics_db.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from analytics.database.Analyzer_DB import AnalyzerDB + +LOGGER = logging.getLogger(__name__) + +def test_verify_databases_and_tables(): + LOGGER.info('>>> test_verify_databases_and_tables : START <<< ') + AnalyzerDBobj = AnalyzerDB() + # AnalyzerDBobj.drop_database() + # AnalyzerDBobj.verify_tables() + AnalyzerDBobj.create_database() + AnalyzerDBobj.create_tables() + AnalyzerDBobj.verify_tables() diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md b/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md index 88f7a7bd5c7a268857a7a4ec2642c388daf715d3..f4faae268f75f96223b4c74571de695fada11497 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md +++ b/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md @@ -1,4 +1,4 @@ -<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md b/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md index 9b0c48c8ed24fe8ca5c06f118b3d440653c686e5..c7e975e864b042a1a4190f6090d5ed2ccee8ebf0 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md +++ b/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md @@ -1,4 +1,4 @@ -<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md b/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md index 452050b65106b8393ac8a7df98ea472b7705e608..ac9143d153d48d713210662249ffc15b833b4c83 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md +++ b/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md @@ -1,4 +1,4 @@ -<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/common/Constants.py b/src/common/Constants.py index de9ac45a4089a7847c37ceeeeab000f51566a3a3..74490321f9c8ec016fa4b48b583e2217c61710ec 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -58,9 +58,16 @@ class ServiceNameEnum(Enum): CACHING = 'caching' TE = 'te' FORECASTER = 'forecaster' - E2EORCHESTRATOR = 'e2eorchestrator' + E2EORCHESTRATOR = 'e2e-orchestrator' OPTICALCONTROLLER = 'opticalcontroller' BGPLS = 'bgpls-speaker' + KPIMANAGER = 'kpi-manager' + KPIVALUEAPI = 'kpi-value-api' + KPIVALUEWRITER = 'kpi-value-writer' + TELEMETRYFRONTEND = 'telemetry-frontend' + TELEMETRYBACKEND = 'telemetry-backend' + ANALYTICSFRONTEND = 'analytics-frontend' + ANALYTICSBACKEND = 'analytics-backend' # Used for test and debugging only DLT_GATEWAY = 'dltgateway' @@ -90,6 +97,13 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.E2EORCHESTRATOR .value : 10050, ServiceNameEnum.OPTICALCONTROLLER .value : 10060, ServiceNameEnum.BGPLS .value : 20030, + ServiceNameEnum.KPIMANAGER .value : 30010, + ServiceNameEnum.KPIVALUEAPI .value : 30020, + ServiceNameEnum.KPIVALUEWRITER .value : 30030, + ServiceNameEnum.TELEMETRYFRONTEND .value : 30050, + ServiceNameEnum.TELEMETRYBACKEND .value : 30060, + ServiceNameEnum.ANALYTICSFRONTEND .value : 30080, + ServiceNameEnum.ANALYTICSBACKEND .value : 30090, # Used for test and debugging only ServiceNameEnum.DLT_GATEWAY .value : 50051, diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 9ed321d5328aa17a856a3a6401bc35576eef679f..23ebe19d681bd0ba774c8f3f4435c233369d0e28 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -47,6 +47,7 @@ class DeviceTypeEnum(Enum): PACKET_ROUTER = 'packet-router' PACKET_SWITCH = 'packet-switch' XR_CONSTELLATION = 'xr-constellation' + QKD_NODE = 'qkd-node' # ETSI TeraFlowSDN controller TERAFLOWSDN_CONTROLLER = 'teraflowsdn' diff --git a/src/common/Settings.py b/src/common/Settings.py index edc74c776d7818468c0162d26b03698aa3ef25ef..eaeb363adc1d9eadb9ddb0487abef8a0885ce380 100644 --- a/src/common/Settings.py +++ b/src/common/Settings.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, os, time +import logging, os, re, time from typing import Dict, List from common.Constants import ( DEFAULT_GRPC_BIND_ADDRESS, DEFAULT_GRPC_GRACE_PERIOD, DEFAULT_GRPC_MAX_WORKERS, DEFAULT_HTTP_BIND_ADDRESS, @@ -68,7 +68,8 @@ def get_setting(name, **kwargs): raise Exception('Setting({:s}) not specified in environment or configuration'.format(str(name))) def get_env_var_name(service_name : ServiceNameEnum, env_var_group): - return ('{:s}SERVICE_{:s}'.format(service_name.value, env_var_group)).upper() + service_name = re.sub(r'[^a-zA-Z0-9]', '_', service_name.value) + return ('{:s}SERVICE_{:s}'.format(service_name, env_var_group)).upper() def get_service_host(service_name : ServiceNameEnum): envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_HOST) diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py index 71b3999bf6e42c3cd9130747af2cdcbe2d9a570e..d86a769ef8f2ab120b42d0b12f93530e8c71c2a3 100644 --- a/src/common/method_wrappers/Decorator.py +++ b/src/common/method_wrappers/Decorator.py @@ -235,3 +235,35 @@ def safe_and_metered_rpc_method(metrics_pool : MetricsPool, logger : logging.Log grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) return inner_wrapper return outer_wrapper + +def safe_and_metered_rpc_method_async(metrics_pool: MetricsPool, logger: logging.Logger): + def outer_wrapper(func): + method_name = func.__name__ + metrics = metrics_pool.get_metrics(method_name) + histogram_duration, counter_started, counter_completed, counter_failed = metrics + + async def inner_wrapper(self, request, grpc_context: grpc.aio.ServicerContext): + counter_started.inc() + try: + logger.debug('{:s} request: {:s}'.format(method_name, grpc_message_to_json_string(request))) + reply = await func(self, request, grpc_context) + logger.debug('{:s} reply: {:s}'.format(method_name, grpc_message_to_json_string(reply))) + counter_completed.inc() + return reply + except ServiceException as e: # pragma: no cover (ServiceException not thrown) + if e.code not in [grpc.StatusCode.NOT_FOUND, grpc.StatusCode.ALREADY_EXISTS]: + # Assume not found or already exists is just a condition, not an error + logger.exception('{:s} exception'.format(method_name)) + counter_failed.inc() + else: + counter_completed.inc() + await grpc_context.abort(e.code, e.details) + except Exception as e: # pragma: no cover, pylint: disable=broad-except + logger.exception('{:s} exception'.format(method_name)) + counter_failed.inc() + await grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) + + return inner_wrapper + + return outer_wrapper + diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py new file mode 100644 index 0000000000000000000000000000000000000000..fc43c315114e7b51c4e2604afbb14e165796e7c5 --- /dev/null +++ b/src/common/tools/kafka/Variables.py @@ -0,0 +1,92 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from enum import Enum +from confluent_kafka.admin import AdminClient, NewTopic +from common.Settings import get_setting + + +LOGGER = logging.getLogger(__name__) +KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-service.{:s}.svc.cluster.local:{:s}' + +class KafkaConfig(Enum): + + @staticmethod + def get_kafka_address() -> str: + # kafka_server_address = get_setting('KFK_SERVER_ADDRESS', default=None) + # if kafka_server_address is None: + KFK_NAMESPACE = get_setting('KFK_NAMESPACE') + KFK_PORT = get_setting('KFK_SERVER_PORT') + kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT) + return kafka_server_address + + @staticmethod + def get_admin_client(): + SERVER_ADDRESS = KafkaConfig.get_kafka_address() + ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_ADDRESS }) + return ADMIN_CLIENT + + +class KafkaTopic(Enum): + # TODO: Later to be populated from ENV variable. + REQUEST = 'topic_request' + RESPONSE = 'topic_response' + RAW = 'topic_raw' + LABELED = 'topic_labeled' + VALUE = 'topic_value' + ANALYTICS_REQUEST = 'topic_request_analytics' + ANALYTICS_RESPONSE = 'topic_response_analytics' + + @staticmethod + def create_all_topics() -> bool: + """ + Method to create Kafka topics defined as class members + """ + all_topics = [member.value for member in KafkaTopic] + LOGGER.debug("Kafka server address is: {:} ".format(KafkaConfig.get_kafka_address())) + if( KafkaTopic.create_new_topic_if_not_exists( all_topics )): + LOGGER.debug("All topics are created sucsessfully or Already Exists") + return True + else: + LOGGER.debug("Error creating all topics") + return False + + @staticmethod + def create_new_topic_if_not_exists(new_topics: list) -> bool: + """ + Method to create Kafka topic if it does not exist. + Args: + list of topic: containing the topic name(s) to be created on Kafka + """ + LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics)) + for topic in new_topics: + try: + topic_metadata = KafkaConfig.get_admin_client().list_topics(timeout=5) + # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics)) + if topic not in topic_metadata.topics: + # If the topic does not exist, create a new topic + print("Topic {:} does not exist. Creating...".format(topic)) + LOGGER.debug("Topic {:} does not exist. Creating...".format(topic)) + new_topic = NewTopic(topic, num_partitions=1, replication_factor=1) + KafkaConfig.get_admin_client().create_topics([new_topic]) + else: + print("Topic name already exists: {:}".format(topic)) + LOGGER.debug("Topic name already exists: {:}".format(topic)) + except Exception as e: + LOGGER.debug("Failed to create topic: {:}".format(e)) + return False + return True + +# create all topics after the deployments (Telemetry and Analytics) diff --git a/src/common/tools/service/GenericGrpcServiceAsync.py b/src/common/tools/service/GenericGrpcServiceAsync.py new file mode 100644 index 0000000000000000000000000000000000000000..488d861777ee7200fc4331449f21dded6b2f6dac --- /dev/null +++ b/src/common/tools/service/GenericGrpcServiceAsync.py @@ -0,0 +1,72 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union +import grpc +import logging +from concurrent import futures +from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH +from grpc_health.v1.health_pb2 import HealthCheckResponse +from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server +from common.Settings import get_grpc_bind_address, get_grpc_grace_period, get_grpc_max_workers + +class GenericGrpcServiceAsync: + def __init__( + self, bind_port: Union[str, int], bind_address: Optional[str] = None, max_workers: Optional[int] = None, + grace_period: Optional[int] = None, enable_health_servicer: bool = True, cls_name: str = __name__ + ) -> None: + self.logger = logging.getLogger(cls_name) + self.bind_port = bind_port + self.bind_address = get_grpc_bind_address() if bind_address is None else bind_address + self.max_workers = get_grpc_max_workers() if max_workers is None else max_workers + self.grace_period = get_grpc_grace_period() if grace_period is None else grace_period + self.enable_health_servicer = enable_health_servicer + self.endpoint = None + self.health_servicer = None + self.pool = None + self.server = None + + async def install_servicers(self): + pass + + async def start(self): + self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port)) + self.logger.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format( + str(self.endpoint), str(self.max_workers))) + + self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers) + self.server = grpc.aio.server(self.pool) + + await self.install_servicers() # Ensure this is awaited + + if self.enable_health_servicer: + self.health_servicer = HealthServicer( + experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1)) + add_HealthServicer_to_server(self.health_servicer, self.server) + + self.bind_port = self.server.add_insecure_port(self.endpoint) + self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port)) + self.logger.info('Listening on {:s}...'.format(str(self.endpoint))) + await self.server.start() + if self.enable_health_servicer: + self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) + + self.logger.debug('Service started') + + async def stop(self): + self.logger.debug('Stopping service (grace period {:s} seconds)...'.format(str(self.grace_period))) + if self.enable_health_servicer: + self.health_servicer.enter_graceful_shutdown() + await self.server.stop(self.grace_period) + self.logger.debug('Service stopped') diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py index 4f6b224748a61f23ea314e27552607ebfa45a05e..cf900ed6df3b4699a4e56f53873174ddd997cd53 100644 --- a/src/context/service/database/models/enums/DeviceDriver.py +++ b/src/context/service/database/models/enums/DeviceDriver.py @@ -33,7 +33,7 @@ class ORM_DeviceDriverEnum(enum.Enum): GNMI_OPENCONFIG = DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG OPTICAL_TFS = DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS IETF_ACTN = DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN - OC = DeviceDriverEnum.DEVICEDRIVER_OC, + OC = DeviceDriverEnum.DEVICEDRIVER_OC QKD = DeviceDriverEnum.DEVICEDRIVER_QKD grpc_to_enum__device_driver = functools.partial( diff --git a/src/device/requirements.in b/src/device/requirements.in index 73ea741d16dcdafd7a9be87ad79b457ccb6c5d5e..bf5e6a2b3128f438a7c044c3f3cf9ee393de2265 100644 --- a/src/device/requirements.in +++ b/src/device/requirements.in @@ -23,7 +23,8 @@ Flask==2.1.3 Flask-HTTPAuth==4.5.0 Flask-RESTful==0.3.9 Jinja2==3.0.3 -ncclient==0.6.13 +numpy<2.0.0 +ncclient==0.6.15 p4runtime==1.3.0 pandas==1.5.* paramiko==2.9.2 diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index cb6158b965a21c974605a27582340620f368bdb9..a5e7f377113342b98203a23a426540f6188f784e 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -178,3 +178,14 @@ if LOAD_ALL_DEVICE_DRIVERS: FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_OC, } ])) + +if LOAD_ALL_DEVICE_DRIVERS: + from .qkd.QKDDriver2 import QKDDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (QKDDriver, [ + { + # Close enough, it does optical switching + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.QKD_NODE, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_QKD, + } + ])) diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index a592b51576acc21e6dc055fe9f41e720f28aae1c..fd36e2dc40e38a125f1812f00eeb304106a40c8a 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import time import json import anytree, copy, logging, pytz, queue, re, threading #import lxml.etree as ET @@ -237,6 +238,8 @@ def edit_config( test_option=test_option, error_option=error_option, format=format) if commit_per_rule: netconf_handler.commit() # configuration commit + if 'table_connections' in resource_key: + time.sleep(5) # CPU usage might exceed critical level after route redistribution, BGP daemon needs time to reload #results[i] = True results.append(True) diff --git a/src/device/service/drivers/openconfig/templates/Acl.py b/src/device/service/drivers/openconfig/templates/Acl.py index cc3da6434fba0442fc11a33b4d8e380ad4e50bd8..e0e778f11415a2dcccd18f9b61166a68a7cf4fc2 100644 --- a/src/device/service/drivers/openconfig/templates/Acl.py +++ b/src/device/service/drivers/openconfig/templates/Acl.py @@ -20,7 +20,7 @@ from .Tools import add_value_from_tag LOGGER = logging.getLogger(__name__) XPATH_ACL_SET = "//ocacl:acl/ocacl:acl-sets/ocacl:acl-set" -XPATH_A_ACL_ENTRY = ".//ocacl:acl-entries/ocacl:ecl-entry" +XPATH_A_ACL_ENTRY = ".//ocacl:acl-entries/ocacl:acl-entry" XPATH_A_IPv4 = ".//ocacl:ipv4/ocacl:config" XPATH_A_TRANSPORT = ".//ocacl:transport/ocacl:config" XPATH_A_ACTIONS = ".//ocacl:actions/ocacl:config" @@ -34,29 +34,31 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: response = [] acl = {} + name = {} for xml_acl in xml_data.xpath(XPATH_ACL_SET, namespaces=NAMESPACES): #LOGGER.info('xml_acl = {:s}'.format(str(ET.tostring(xml_acl)))) acl_name = xml_acl.find('ocacl:name', namespaces=NAMESPACES) if acl_name is None or acl_name.text is None: continue - add_value_from_tag(acl, 'name', acl_name) + add_value_from_tag(name, 'name', acl_name) acl_type = xml_acl.find('ocacl:type', namespaces=NAMESPACES) add_value_from_tag(acl, 'type', acl_type) for xml_acl_entries in xml_acl.xpath(XPATH_A_ACL_ENTRY, namespaces=NAMESPACES): - acl_id = xml_acl_entries.find('ocacl:sequence_id', namespaces=NAMESPACES) - add_value_from_tag(acl, 'sequence_id', acl_id) + acl_id = xml_acl_entries.find('ocacl:sequence-id', namespaces=NAMESPACES) + add_value_from_tag(acl, 'sequence-id', acl_id) + LOGGER.info('xml_acl_id = {:s}'.format(str(ET.tostring(acl_id)))) for xml_ipv4 in xml_acl_entries.xpath(XPATH_A_IPv4, namespaces=NAMESPACES): - ipv4_source = xml_ipv4.find('ocacl:source_address', namespaces=NAMESPACES) - add_value_from_tag(acl, 'source_address' , ipv4_source) + ipv4_source = xml_ipv4.find('ocacl:source-address', namespaces=NAMESPACES) + add_value_from_tag(acl, 'source-address' , ipv4_source) - ipv4_destination = xml_ipv4.find('ocacl:destination_address', namespaces=NAMESPACES) - add_value_from_tag(acl, 'destination_address' , ipv4_destination) + ipv4_destination = xml_ipv4.find('ocacl:destination-address', namespaces=NAMESPACES) + add_value_from_tag(acl, 'destination-address' , ipv4_destination) ipv4_protocol = xml_ipv4.find('ocacl:protocol', namespaces=NAMESPACES) add_value_from_tag(acl, 'protocol' , ipv4_protocol) @@ -64,30 +66,30 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: ipv4_dscp = xml_ipv4.find('ocacl:dscp', namespaces=NAMESPACES) add_value_from_tag(acl, 'dscp' , ipv4_dscp) - ipv4_hop_limit = xml_ipv4.find('ocacl:hop_limit', namespaces=NAMESPACES) - add_value_from_tag(acl, 'hop_limit' , ipv4_hop_limit) + ipv4_hop_limit = xml_ipv4.find('ocacl:hop-limit', namespaces=NAMESPACES) + add_value_from_tag(acl, 'hop-limit' , ipv4_hop_limit) for xml_transport in xml_acl_entries.xpath(XPATH_A_TRANSPORT, namespaces=NAMESPACES): - transport_source = xml_transport.find('ocacl:source_port', namespaces=NAMESPACES) - add_value_from_tag(acl, 'source_port' ,transport_source) + transport_source = xml_transport.find('ocacl:source-port', namespaces=NAMESPACES) + add_value_from_tag(acl, 'source-port' ,transport_source) - transport_destination = xml_transport.find('ocacl:destination_port', namespaces=NAMESPACES) - add_value_from_tag(acl, 'destination_port' ,transport_destination) + transport_destination = xml_transport.find('ocacl:destination-port', namespaces=NAMESPACES) + add_value_from_tag(acl, 'destination-port' ,transport_destination) - transport_tcp_flags = xml_transport.find('ocacl:tcp_flags', namespaces=NAMESPACES) - add_value_from_tag(acl, 'tcp_flags' ,transport_tcp_flags) + transport_tcp_flags = xml_transport.find('ocacl:tcp-flags', namespaces=NAMESPACES) + add_value_from_tag(acl, 'tcp-flags' ,transport_tcp_flags) for xml_action in xml_acl_entries.xpath(XPATH_A_ACTIONS, namespaces=NAMESPACES): - action = xml_action.find('ocacl:forwarding_action', namespaces=NAMESPACES) - add_value_from_tag(acl, 'forwarding_action' ,action) + action = xml_action.find('ocacl:forwarding-action', namespaces=NAMESPACES) + add_value_from_tag(acl, 'forwarding-action' ,action) - log_action = xml_action.find('ocacl:log_action', namespaces=NAMESPACES) - add_value_from_tag(acl, 'log_action' ,log_action) + log_action = xml_action.find('ocacl:log-action', namespaces=NAMESPACES) + add_value_from_tag(acl, 'log-action' ,log_action) resource_key = '/acl/acl-set[{:s}][{:s}]/acl-entry[{:s}]'.format( - acl['name'], acl['type'], acl['sequence-id']) + name['name'], acl['type'], acl['sequence-id']) response.append((resource_key,acl)) for xml_interface in xml_data.xpath(XPATH_INTERFACE, namespaces=NAMESPACES): @@ -99,25 +101,25 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: for xml_ingress in xml_interface.xpath(XPATH_I_INGRESS, namespaces=NAMESPACES): - i_name = xml_ingress.find('ocacl:set_name_ingress', namespaces=NAMESPACES) - add_value_from_tag(interface, 'ingress_set_name' , i_name) + i_name = xml_ingress.find('ocacl:set-name-ingress', namespaces=NAMESPACES) + add_value_from_tag(interface, 'ingress-set-name' , i_name) - i_type = xml_ingress.find('ocacl:type_ingress', namespaces=NAMESPACES) - add_value_from_tag(interface, 'ingress_type' , i_type) + i_type = xml_ingress.find('ocacl:type-ingress', namespaces=NAMESPACES) + add_value_from_tag(interface, 'ingress-type' , i_type) resource_key = '/acl/interfaces/ingress[{:s}][{:s}]'.format( - acl['name'], acl['type']) + name['name'], acl['type']) response.append((resource_key,interface)) for xml_egress in xml_interface.xpath(XPATH_I_EGRESS, namespaces=NAMESPACES): - e_name = xml_egress.find('ocacl:set_name_egress', namespaces=NAMESPACES) - add_value_from_tag(interface, 'egress_set_name' , e_name) + e_name = xml_egress.find('ocacl:set-name-egress', namespaces=NAMESPACES) + add_value_from_tag(interface, 'egress-set-name' , e_name) - e_type = xml_egress.find('ocacl:type_egress', namespaces=NAMESPACES) - add_value_from_tag(interface, 'egress_type' , e_type) + e_type = xml_egress.find('ocacl:type-egress', namespaces=NAMESPACES) + add_value_from_tag(interface, 'egress-type' , e_type) resource_key = '/acl/interfaces/egress[{:s}][{:s}]'.format( - acl['name'], acl['type']) + name['name'], acl['type']) response.append((resource_key,interface)) return response diff --git a/src/device/service/drivers/openconfig/templates/Inventory.py b/src/device/service/drivers/openconfig/templates/Inventory.py index 9897f04f9df2dd6c1ce4010d9ad9878ae0d04242..4fca35bc39ea4a72a864f16832f944ed7ad21621 100644 --- a/src/device/service/drivers/openconfig/templates/Inventory.py +++ b/src/device/service/drivers/openconfig/templates/Inventory.py @@ -75,6 +75,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: component_location = xml_component.find('ocp:state/ocp:location', namespaces=NAMESPACES) if not component_location is None: add_value_from_tag(inventory['attributes'], 'location', component_location) + + component_id = xml_component.find('ocp:state/ocp:id', namespaces=NAMESPACES) + if not component_id is None: + add_value_from_tag(inventory['attributes'], 'id', component_id) component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES) if component_type is not None: @@ -109,7 +113,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: component_mfg_name = xml_component.find('ocp:state/ocp:mfg-name', namespaces=NAMESPACES) if not component_mfg_name is None: - add_value_from_tag(inventory['attributes'], 'manufacturer-name', component_mfg_name) + add_value_from_tag(inventory['attributes'], 'mfg-name', component_mfg_name) component_removable = xml_component.find('ocp:state/ocp:removable', namespaces=NAMESPACES) if not component_removable is None: diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py index 7bed281812c4097124f4794a7d6232993b125957..97b55c817fdf002fe5f09852b203e5b14c600b06 100644 --- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py +++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py @@ -23,6 +23,8 @@ XPATH_NETWORK_INSTANCES = "//ocni:network-instances/ocni:network-instance" XPATH_NI_PROTOCOLS = ".//ocni:protocols/ocni:protocol" XPATH_NI_TABLE_CONNECTS = ".//ocni:table-connections/ocni:table-connection" +XPATH_NI_INTERFACE = ".//ocni:interfaces/ocni:interface" + XPATH_NI_IIP_AP = ".//ocni:inter-instance-policies/ocni:apply-policy" XPATH_NI_IIP_AP_IMPORT = ".//ocni:config/ocni:import-policy" XPATH_NI_IIP_AP_EXPORT = ".//ocni:config/ocni:export-policy" @@ -136,6 +138,21 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: table_connection['address_family']) response.append((resource_key, table_connection)) + for xml_interface in xml_network_instance.xpath(XPATH_NI_INTERFACE, namespaces=NAMESPACES): + LOGGER.info('xml_interfaces = {:s}'.format(str(ET.tostring(xml_interface)))) + + interface = {} + name_iface = xml_interface.find('ocni:config/ocni:interface', namespaces=NAMESPACES) + if name_iface is None or name_iface.text is None: continue + add_value_from_tag(interface, 'name_iface', name_iface) + + name_subiface = xml_interface.find('ocni:config/ocni:subinterface', namespaces=NAMESPACES) + add_value_from_tag(interface, 'name_subiface', name_subiface) + + resource_key = '/network_instance[{:s}]/interface[{:s}]'.format( + network_instance['name'], interface['name_iface']) + response.append((resource_key, interface)) + for xml_iip_ap in xml_network_instance.xpath(XPATH_NI_IIP_AP, namespaces=NAMESPACES): #LOGGER.info('xml_iip_ap = {:s}'.format(str(ET.tostring(xml_iip_ap)))) diff --git a/src/device/service/drivers/openconfig/templates/Tools.py b/src/device/service/drivers/openconfig/templates/Tools.py index b7a5ba9c1a962032fe13b4ec5cb70eae7ff604a1..c4ef22b1e3c11f1e512026bea8e2122ab703a9e5 100644 --- a/src/device/service/drivers/openconfig/templates/Tools.py +++ b/src/device/service/drivers/openconfig/templates/Tools.py @@ -61,7 +61,8 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend elif "inter_instance_policies" in resource_key: result_templates.append(associate_RP_to_NI(data)) elif "protocols" in resource_key: - if vendor == "ADVA": result_templates.append(add_protocol_NI(data, vendor, delete)) + if vendor is None or vendor == "ADVA": + result_templates.append(add_protocol_NI(data, vendor, delete)) elif "table_connections" in resource_key: result_templates.append(create_table_conns(data, delete)) elif "interface" in resource_key: diff --git a/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py b/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py index 09e3b618a69c738b57b4d2268c0429e6f8119147..ab57ce3bd26e9183f931a1a6e13a44a9a85bef7d 100644 --- a/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py +++ b/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py @@ -54,7 +54,7 @@ def create_If_SubIf(data,vendor, DEL): with tag('enabled'):text('true') with tag('subinterfaces'): with tag('subinterface'): - if vendor == 'ADVA': + if vendor is None or vendor == 'ADVA': with tag('index'): text('0') with tag('config'): with tag('index'): text('0') @@ -65,8 +65,10 @@ def create_If_SubIf(data,vendor, DEL): with tag('single-tagged'): with tag('config'): with tag('vlan-id'):text(data['vlan_id']) - if "l3ipvlan" in data['type']: + if "l3ipvlan" in data['type'] and 'address_ip' in data: with tag('ipv4', xmlns="http://openconfig.net/yang/interfaces/ip"): + if 'mtu' in data: + with tag('mtu'):text(data['mtu']) with tag('addresses'): with tag('address'): with tag('ip'):text(data['address_ip']) diff --git a/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py b/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py index c2d18ef172bccaf46b4e323a1fef6ef048232888..157dd0ab89a0eb625d428dd95109faabc399bcf0 100644 --- a/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py +++ b/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py @@ -64,10 +64,12 @@ def create_NI(parameters,vendor,DEL): elif "L3VRF" in parameters['type']: with tag('config'): with tag('name'):text(parameters['name']) - if vendor == "ADVA": + if "router_id" in parameters: + with tag('router-id'):text(parameters['router_id']) + if vendor is None or vendor == 'ADVA': with tag('type', 'xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types"'):text('oc-ni-types:',parameters['type']) with tag('route-distinguisher'):text(parameters['route_distinguisher']) - if vendor == "ADVA": + if vendor is None or vendor == 'ADVA': with tag('encapsulation'): with tag('config'): with tag('encapsulation-type', 'xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types"') :text('oc-ni-types:MPLS') @@ -123,14 +125,29 @@ def add_protocol_NI(parameters,vendor, DEL): with tag('config'): with tag('identifier', 'xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):text('oc-pol-types:',parameters['identifier']) with tag('name') :text(parameters['protocol_name']) + with tag('enabled'): text('true') if "BGP" in parameters['identifier']: with tag('bgp'): + with tag('name'): text(parameters['as']) with tag('global'): with tag('config'): with tag('as') :text(parameters['as']) - if "router-id" in parameters: - with tag('router-id'):text(parameters['router-id']) - if vendor == "ADVA": + if "router_id" in parameters: + with tag('router-id'):text(parameters['router_id']) + if 'neighbors' in parameters: + with tag('neighbors'): + for neighbor in parameters['neighbors']: + with tag('neighbor'): + with tag('neighbor-address'): text(neighbor['ip_address']) + with tag('afi-safis'): + with tag('afi-safi', 'xmlns:oc-bgp-types="http://openconfig.net/yang/bgp-types"'): + with tag('afi-safi-name'): text('oc-bgp-types:IPV4_UNICAST') + with tag('enabled'): text('true') + with tag('config'): + with tag('neighbor-address'): text(neighbor['ip_address']) + with tag('enabled'): text('true') + with tag('peer-as'): text(parameters['as']) + if vendor is None or vendor == 'ADVA': with tag('tables'): with tag('table'): with tag('protocol', 'xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):text('oc-pol-types:',parameters['identifier']) @@ -177,6 +194,9 @@ def associate_If_to_NI(parameters, DEL): else: with tag('network-instance'): with tag('name'):text(parameters['name']) + with tag('config'): + with tag('name'):text(parameters['name']) + with tag('type', 'xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types"'):text('oc-ni-types:',parameters['type']) with tag('interfaces'): with tag('interface'): with tag('id'):text(parameters['id']) @@ -315,7 +335,7 @@ def create_table_conns(parameters,DEL): with tag('table-connection','xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete"'): with tag('src-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'): text('oc-pol-types:',parameters['src_protocol']) with tag('dst-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'): text('oc-pol-types:',parameters['dst_protocol']) - with tag('address-family', 'xmlns:oc-types="http://openconfig.net/yang/openconfig-types"'):text('oc-types:',parameters['dst_protocol']) + with tag('address-family', 'xmlns:oc-types="http://openconfig.net/yang/openconfig-types"'):text('oc-types:',parameters['address_family']) else: with tag('table-connections'): with tag('table-connection'): @@ -326,6 +346,8 @@ def create_table_conns(parameters,DEL): with tag('src-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'): text('oc-pol-types:',parameters['src_protocol']) with tag('dst-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'): text('oc-pol-types:',parameters['dst_protocol']) with tag('address-family', 'xmlns:oc-types="http://openconfig.net/yang/openconfig-types"'):text('oc-types:',parameters['address_family']) + # for OCNOS: check if needed + #with tag('dst-instance', 'xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-oc-ni-augments"'):text('65000') if len(parameters['default_import_policy']) != 0: with tag('default-import-policy'):text(parameters['default_import_policy']) result = indent( diff --git a/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py b/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py index 5cc8cc71de9a952eecc8b3df2d71b6d38c496eb9..69fdd2cc52ec179665b6fc5a766b04b0e6c2a6ae 100644 --- a/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py +++ b/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py @@ -133,14 +133,14 @@ data_2 = {'ext_community_member' : '65001:101', 'ext_community_set_name' : 'set_srv_101_a'} print('\nRouting Policy Statement - CREATE\n') -print(rp_statement(data_1, False)) +print(create_rp_statement(data_1, False)) print('\nRouting Policy Statement - DELETE\n') -print(rp_statement(data_1, True)) +print(create_rp_statement(data_1, True)) print('\nRouting Policy Defined Set - CREATE\n') -print(rp_defined_set(data_2, False)) +print(create_rp_def(data_2, False)) print('\nRouting Policy Defined Set - DELETE\n') -print(rp_defined_set(data_2, True)) +print(create_rp_def(data_2, True)) ''' ''' diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py index 0c1a057e7c07bebb0e41e295e8d44082bc3ef236..a209d9607c8dca0b5ce09b7b98592a7cdb9b9aaf 100644 --- a/src/device/service/drivers/openconfig/templates/__init__.py +++ b/src/device/service/drivers/openconfig/templates/__init__.py @@ -27,6 +27,9 @@ from .NetworkInstances import parse as parse_network_instances from .RoutingPolicy import parse as parse_routing_policy from .Acl import parse as parse_acl from .Inventory import parse as parse_inventory +from .acl.acl_adapter import acl_cr_to_dict +from .acl.acl_adapter_ipinfusion_proprietary import acl_cr_to_dict_ipinfusion_proprietary + LOGGER = logging.getLogger(__name__) ALL_RESOURCE_KEYS = [ @@ -112,16 +115,34 @@ def compose_config( # template generation ] elif (message_renderer == "jinja"): - templates =[] - template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key)) - templates.append(JINJA_ENV.get_template(template_name)) - + templates = [] if "acl_ruleset" in resource_key: # MANAGING ACLs - templates =[] - templates.append(JINJA_ENV.get_template('acl/acl-set/acl-entry/edit_config.xml')) - templates.append(JINJA_ENV.get_template('acl/interfaces/ingress/edit_config.xml')) - data : Dict[str, Any] = json.loads(resource_value) - operation = 'delete' if delete else 'merge' + if vendor == 'ipinfusion': # ipinfusion proprietary netconf receipe is used temporarily + enable_ingress_filter_path = 'acl/interfaces/ingress/enable_ingress_filter.xml' + acl_entry_path = 'acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml' + acl_ingress_path = 'acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml' + data : Dict[str, Any] = acl_cr_to_dict_ipinfusion_proprietary(resource_value, delete=delete) + else: + enable_ingress_filter_path = 'acl/interfaces/ingress/enable_ingress_filter.xml' + acl_entry_path = 'acl/acl-set/acl-entry/edit_config.xml' + acl_ingress_path = 'acl/interfaces/ingress/edit_config.xml' + data : Dict[str, Any] = acl_cr_to_dict(resource_value, delete=delete) + + if delete: # unpair acl and interface before removing acl + templates.append(JINJA_ENV.get_template(acl_ingress_path)) + templates.append(JINJA_ENV.get_template(acl_entry_path)) + templates.append(JINJA_ENV.get_template(enable_ingress_filter_path)) + else: + templates.append(JINJA_ENV.get_template(enable_ingress_filter_path)) + templates.append(JINJA_ENV.get_template(acl_entry_path)) + templates.append(JINJA_ENV.get_template(acl_ingress_path)) + else: + template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key)) + templates.append(JINJA_ENV.get_template(template_name)) + data : Dict[str, Any] = json.loads(resource_value) + + operation = 'delete' if delete else 'merge' # others + #operation = 'delete' if delete else '' # ipinfusion? return [ '<config>{:s}</config>'.format( diff --git a/src/device/service/drivers/openconfig/templates/acl/__init__.py b/src/device/service/drivers/openconfig/templates/acl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..839e45e3b646bc60de7edd81fcfb91b7b38feadf --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/acl/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml new file mode 100644 index 0000000000000000000000000000000000000000..d0210a66c1b5d7de1a4be479cd79e9b48131e2a0 --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml @@ -0,0 +1,34 @@ +<acl xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-acl"> + <acl-sets> + <acl-set {% if operation == 'delete' %}operation="delete"{% endif %}> + <name>{{name}}</name> + {% if type is defined %}<type>{{type}}</type>{% endif %} + <config> + <name>{{name}}</name> + {% if type is defined %}<type>{{type}}</type>{% endif %} + </config> + {% if operation != 'delete' %} + <acl-entries> + <acl-entry> + <sequence-id>{{sequence_id}}</sequence-id> + <config> + <sequence-id>{{sequence_id}}</sequence-id> + </config> + <ipv4> + <config> + <source-address>{{source_address}}</source-address> + <destination-address>{{destination_address}}</destination-address> + <dscp>{{dscp}}</dscp> + <protocol-tcp /> + <tcp-source-port>{{source_port}}</tcp-source-port> + <tcp-destination-port>{{destination_port}}</tcp-destination-port> + <tcp-flags>{{tcp_flags}}</tcp-flags> + <forwarding-action>{{forwarding_action}}</forwarding-action> + </config> + </ipv4> + </acl-entry> + </acl-entries> + {% endif %} + </acl-set> + </acl-sets> +</acl> \ No newline at end of file diff --git a/src/device/service/drivers/openconfig/templates/acl/acl_adapter.py b/src/device/service/drivers/openconfig/templates/acl/acl_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..15e723680c355d58b84d0a1677be3f21a0fb95ed --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/acl/acl_adapter.py @@ -0,0 +1,73 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, TypedDict + +from ..ACL.ACL_multivendor import RULE_TYPE_MAPPING, FORWARDING_ACTION_MAPPING, LOG_ACTION_MAPPING + +class ACLRequestData(TypedDict): + name: str # acl-set name + type: str # acl-set type + sequence_id: int # acl-entry sequence-id + source_address: str + destination_address: str + forwarding_action: str + id: str # interface id + interface: str + subinterface: int + set_name_ingress: str # ingress-acl-set name + type_ingress: str # ingress-acl-set type + all: bool + dscp: int + protocol: int + tcp_flags: str + source_port: int + destination_port: int + +def acl_cr_to_dict(acl_cr_dict: Dict, subinterface:int = 0) -> Dict: + rule_set = acl_cr_dict['rule_set'] + rule_set_entry = rule_set['entries'][0] + rule_set_entry_match = rule_set_entry['match'] + rule_set_entry_action = rule_set_entry['action'] + + name: str = rule_set['name'] + type: str = RULE_TYPE_MAPPING[rule_set["type"]] + sequence_id = rule_set_entry['sequence_id'] + source_address = rule_set_entry_match['src_address'] + destination_address = rule_set_entry_match['dst_address'] + forwarding_action: str = FORWARDING_ACTION_MAPPING[rule_set_entry_action['forward_action']] + interface_id = acl_cr_dict['interface'] + interface = interface_id + set_name_ingress = name + type_ingress = type + + return ACLRequestData( + name=name, + type=type, + sequence_id=sequence_id, + source_address=source_address, + destination_address=destination_address, + forwarding_action=forwarding_action, + id=interface_id, + interface=interface, + # subinterface=subinterface, + set_name_ingress=set_name_ingress, + type_ingress=type_ingress, + all=True, + dscp=18, + protocol=6, + tcp_flags='TCP_SYN', + source_port=22, + destination_port=80 + ) diff --git a/src/device/service/drivers/openconfig/templates/acl/acl_adapter_ipinfusion_proprietary.py b/src/device/service/drivers/openconfig/templates/acl/acl_adapter_ipinfusion_proprietary.py new file mode 100644 index 0000000000000000000000000000000000000000..52213c2aba9a128ace4e927a5f01f9be278442b6 --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/acl/acl_adapter_ipinfusion_proprietary.py @@ -0,0 +1,63 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, TypedDict + + +RULE_TYPE_MAPPING = { + 'ACLRULETYPE_IPV4' : 'ip', +} + +FORWARDING_ACTION_MAPPING = { + 'ACLFORWARDINGACTION_DROP' : 'deny', + 'ACLFORWARDINGACTION_ACCEPT' : 'permit', +} + +class ACLRequestData(TypedDict): + name: str # acl-set name + type: str # acl-set type + sequence_id: int # acl-entry sequence-id + source_address: str + destination_address: str + forwarding_action: str + interface: str + dscp: int + tcp_flags: str + source_port: int + destination_port: int + +def acl_cr_to_dict_ipinfusion_proprietary(acl_cr_dict: Dict, delete: bool = False) -> Dict: + rule_set = acl_cr_dict['rule_set'] + name: str = rule_set['name'] + type: str = RULE_TYPE_MAPPING[rule_set["type"]] + interface = acl_cr_dict['interface'][5:] # remove preceding `PORT-` characters + if delete: + return ACLRequestData(name=name, type=type, interface=interface) + rule_set_entry = rule_set['entries'][0] + rule_set_entry_match = rule_set_entry['match'] + rule_set_entry_action = rule_set_entry['action'] + + return ACLRequestData( + name=name, + type=type, + sequence_id=rule_set_entry['sequence_id'], + source_address=rule_set_entry_match['src_address'], + destination_address=rule_set_entry_match['dst_address'], + forwarding_action=FORWARDING_ACTION_MAPPING[rule_set_entry_action['forward_action']], + interface=interface, + dscp=rule_set_entry_match["dscp"], + tcp_flags=rule_set_entry_match["flags"], + source_port=rule_set_entry_match['src_port'], + destination_port=rule_set_entry_match['dst_port'] + ) diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml new file mode 100644 index 0000000000000000000000000000000000000000..6e502154f16a7a9d4ce0afc0c49ab96b3a2bd979 --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml @@ -0,0 +1,26 @@ +<acl xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-acl"> + <interfaces> + <interface> + <name>{{interface}}</name> + <config> + <name>{{interface}}</name> + </config> + <ingress-acl-sets> + <ingress-acl-set {% if operation == "delete" %}operation="delete"{% endif %}> + {% if type is defined %}<acl-type>{{type}}</acl-type>{% endif %} + <access-groups> + <access-group> + <acl-name>{{name}}</acl-name> + <config> + <acl-name>{{name}}</acl-name> + </config> + </access-group> + </access-groups> + <config> + {% if type is defined %}<acl-type>{{type}}</acl-type>{% endif %} + </config> + </ingress-acl-set> + </ingress-acl-sets> + </interface> + </interfaces> +</acl> \ No newline at end of file diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/enable_ingress_filter.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/enable_ingress_filter.xml new file mode 100644 index 0000000000000000000000000000000000000000..274028657547dd31d20654e2a59ac11554cb01d5 --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/enable_ingress_filter.xml @@ -0,0 +1,9 @@ +<profiles xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-platform"> + <hardware-profile> + <filters> + <config> + <ingress-ipv4-extended {% if operation == "delete" %}operation="delete"{% endif %}></ingress-ipv4-extended> + </config> + </filters> + </hardware-profile> +</profiles> \ No newline at end of file diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml index e441004006e4cdd445f1d0244a9582b57956af40..2d8d3ee07b3a8df20a4b51be755e18b7aec982de 100644 --- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml @@ -1,5 +1,4 @@ -<interfaces xmlns="http://openconfig.net/yang/interfaces" - xmlns:oc-ip="http://openconfig.net/yang/interfaces/ip" > +<interfaces xmlns="http://openconfig.net/yang/interfaces"> <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> <name>{{name}}</name> {% if operation is defined and operation != 'delete' %} @@ -31,17 +30,20 @@ </vlan> {% endif %} {% if address_ip is defined %} - <oc-ip:ipv4> - <oc-ip:addresses> - <oc-ip:address> - <oc-ip:ip>{{address_ip}}</oc-ip:ip> - <oc-ip:config> - <oc-ip:ip>{{address_ip}}</oc-ip:ip> - <oc-ip:prefix-length>{{address_prefix}}</oc-ip:prefix-length> - </oc-ip:config> - </oc-ip:address> - </oc-ip:addresses> - </oc-ip:ipv4> + <ipv4 xmlns="http://openconfig.net/yang/interfaces/ip"> + <config> + {% if mtu is defined %}<mtu>{{mtu}}</mtu>{% endif%} + </config> + <addresses> + <address> + <ip>{{address_ip}}</ip> + <config> + <ip>{{address_ip}}</ip> + <prefix-length>{{address_prefix}}</prefix-length> + </config> + </address> + </addresses> + </ipv4> {% endif %} </subinterface> </subinterfaces> diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml index 855f321b4a69ba1e660487c108a05d0ec4b5d475..e926796d039d54e30f6ba13eb5eb66bcec079c08 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml @@ -1,6 +1,10 @@ <network-instances xmlns="http://openconfig.net/yang/network-instance"> <network-instance> <name>{{name}}</name> + <config> + <name>{{name}}</name> + <type xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types">oc-ni-types:{{type}}</type> + </config> <interfaces> <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> <id>{{id}}</id> diff --git a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml index c9c068e480c0569cfe5f97b78b28fbe03e2595f8..da66d97f053f509a1a595cdb1abc0bd1791ad0bc 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml @@ -9,15 +9,37 @@ <config> <identifier xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</identifier> <name>{{protocol_name}}</name> + <enabled>true</enabled> </config> {% if identifier=='BGP' %} <bgp> + <name>{{as}}</name> <global> <config> <as>{{as}}</as> <router-id>{{router_id}}</router-id> </config> </global> + {% if neighbors is defined %} + <neighbors> + {% for neighbor in neighbors %} + <neighbor> + <neighbor-address>{{neighbor['ip_address']}}</neighbor-address> + <afi-safis> + <afi-safi xmlns:oc-bgp-types="http://openconfig.net/yang/bgp-types"> + <afi-safi-name>oc-bgp-types:IPV4_UNICAST</afi-safi-name> + <enabled>true</enabled> + </afi-safi> + </afi-safis> + <config> + <neighbor-address>{{neighbor['ip_address']}}</neighbor-address> + <enabled>true</enabled> + <peer-as>{{as}}</peer-as> + </config> + </neighbor> + {% endfor %} + </neighbors> + {% endif %} </bgp> {% endif %} {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml index 46bf5e387789c7efc800ad96ed759748273bed34..35c535c6bd3f78e30fc2177ecc722b1115f54fc5 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml @@ -11,6 +11,9 @@ <src-protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{src_protocol}}</src-protocol> <dst-protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{dst_protocol}}</dst-protocol> <address-family xmlns:oc-types="http://openconfig.net/yang/openconfig-types">oc-types:{{address_family}}</address-family> + {% if False %} + <dst-instance xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-oc-ni-augments">{{as}}</dst-instance> + {% endif %} {% if default_import_policy is defined %}<default-import-policy>{{default_import_policy}}</default-import-policy>{% endif %} </config> {% endif %} diff --git a/src/device/service/drivers/qkd/QKDDriver.py b/src/device/service/drivers/qkd/QKDDriver.py new file mode 100644 index 0000000000000000000000000000000000000000..a49144d6fc0840498c5f5f1267d1cef25cb1177a --- /dev/null +++ b/src/device/service/drivers/qkd/QKDDriver.py @@ -0,0 +1,168 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, requests, threading +from requests.auth import HTTPBasicAuth +from typing import Any, Iterator, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.type_checkers.Checkers import chk_string, chk_type +from device.service.driver_api._Driver import _Driver +from . import ALL_RESOURCE_KEYS +from .Tools import find_key, config_getter, create_connectivity_link + +LOGGER = logging.getLogger(__name__) + +DRIVER_NAME = 'qkd' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + + +class QKDDriver(_Driver): + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) + self.__lock = threading.Lock() + self.__started = threading.Event() + self.__terminate = threading.Event() + username = self.settings.get('username') + password = self.settings.get('password') + self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None + scheme = self.settings.get('scheme', 'http') + self.__qkd_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port)) + self.__timeout = int(self.settings.get('timeout', 120)) + self.__node_ids = set(self.settings.get('node_ids', [])) + token = self.settings.get('token') + self.__headers = {'Authorization': 'Bearer ' + token} + self.__initial_data = None + + def Connect(self) -> bool: + url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node' + with self.__lock: + if self.__started.is_set(): return True + r = None + try: + LOGGER.info(f'requests.get("{url}", timeout={self.__timeout}, verify=False, auth={self.__auth}, headers={self.__headers})') + r = requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth, headers=self.__headers) + LOGGER.info(f'R: {r}') + LOGGER.info(f'Text: {r.text}') + LOGGER.info(f'Json: {r.json()}') + except requests.exceptions.Timeout: + LOGGER.exception('Timeout connecting {:s}'.format(str(self.__qkd_root))) + return False + except Exception: # pylint: disable=broad-except + LOGGER.exception('Exception connecting {:s}'.format(str(self.__qkd_root))) + return False + else: + self.__started.set() + self.__initial_data = r.json() + return True + + def Disconnect(self) -> bool: + with self.__lock: + self.__terminate.set() + return True + + @metered_subclass_method(METRICS_POOL) + def GetInitialConfig(self) -> List[Tuple[str, Any]]: + with self.__lock: + return self.__initial_data + + @metered_subclass_method(METRICS_POOL) + def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: + chk_type('resources', resource_keys, list) + results = [] + with self.__lock: + if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS + for i, resource_key in enumerate(resource_keys): + str_resource_name = 'resource_key[#{:d}]'.format(i) + chk_string(str_resource_name, resource_key, allow_empty=False) + results.extend(config_getter( + self.__qkd_root, resource_key, timeout=self.__timeout, auth=self.__auth, + node_ids=self.__node_ids, headers=self.__headers)) + return results + + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: + return results + with self.__lock: + for resource_key, resource_value in resources: + LOGGER.info('resource = {:s}'.format(str(resource_key))) + + if resource_key.startswith('/link'): + try: + resource_value = json.loads(resource_value) + link_uuid = resource_value['uuid'] + + node_id_src = resource_value['src_qkdn_id'] + interface_id_src = resource_value['src_interface_id'] + node_id_dst = resource_value['dst_qkdn_id'] + interface_id_dst = resource_value['dst_interface_id'] + virt_prev_hop = resource_value.get('virt_prev_hop') + virt_next_hops = resource_value.get('virt_next_hops') + virt_bandwidth = resource_value.get('virt_bandwidth') + + + data = create_connectivity_link( + self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + virt_prev_hop, virt_next_hops, virt_bandwidth, + timeout=self.__timeout, auth=self.__auth, headers=self.__headers + ) + + #data = create_connectivity_link( + # self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + # timeout=self.__timeout, auth=self.__auth + #) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unhandled error processing resource_key({:s})'.format(str(resource_key))) + results.append(e) + else: + results.append(True) + + LOGGER.info('Test keys: ' + str([x for x,y in resources])) + LOGGER.info('Test values: ' + str(results)) + return results + + ''' + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: return results + with self.__lock: + for resource in resources: + LOGGER.info('resource = {:s}'.format(str(resource))) + uuid = find_key(resource, 'uuid') + results.extend(delete_connectivity_service( + self.__qkd_root, uuid, timeout=self.__timeout, auth=self.__auth)) + return results + ''' + + @metered_subclass_method(METRICS_POOL) + def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + # TODO: QKD API Driver does not support monitoring by now + LOGGER.info(f'Subscribe {self.address}: {subscriptions}') + return [True for _ in subscriptions] + + @metered_subclass_method(METRICS_POOL) + def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + # TODO: QKD API Driver does not support monitoring by now + return [False for _ in subscriptions] + + def GetState( + self, blocking=False, terminate : Optional[threading.Event] = None + ) -> Iterator[Tuple[float, str, Any]]: + # TODO: QKD API Driver does not support monitoring by now + LOGGER.info(f'GetState {self.address} called') + return [] diff --git a/src/device/service/drivers/qkd/QKDDriver2.py b/src/device/service/drivers/qkd/QKDDriver2.py new file mode 100644 index 0000000000000000000000000000000000000000..c73a83141d92955d01a6a00912389b671fe7ef98 --- /dev/null +++ b/src/device/service/drivers/qkd/QKDDriver2.py @@ -0,0 +1,216 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +import logging +import requests +import threading +from requests.auth import HTTPBasicAuth +from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.type_checkers.Checkers import chk_string, chk_type +from device.service.driver_api._Driver import _Driver +from .Tools2 import config_getter, create_connectivity_link +from device.service.driver_api._Driver import _Driver +from . import ALL_RESOURCE_KEYS + +LOGGER = logging.getLogger(__name__) + +DRIVER_NAME = 'qkd' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + + +class QKDDriver(_Driver): + def __init__(self, address: str, port: int, **settings) -> None: + LOGGER.info(f"Initializing QKDDriver with address={address}, port={port}, settings={settings}") + super().__init__(DRIVER_NAME, address, port, **settings) + self.__lock = threading.Lock() + self.__started = threading.Event() + self.__terminate = threading.Event() + self.__auth = None + self.__headers = {} + self.__qkd_root = os.getenv('QKD_API_URL', f"http://{self.address}:{self.port}") # Simplified URL management + self.__timeout = int(self.settings.get('timeout', 120)) + self.__node_ids = set(self.settings.get('node_ids', [])) + self.__initial_data = None + + # Optionally pass headers for authentication (e.g., JWT) + self.__headers = settings.get('headers', {}) + self.__auth = settings.get('auth', None) + + LOGGER.info(f"QKDDriver initialized with QKD root URL: {self.__qkd_root}") + + def Connect(self) -> bool: + url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node' + with self.__lock: + LOGGER.info(f"Starting connection to {url}") + if self.__started.is_set(): + LOGGER.info("Already connected, skipping re-connection.") + return True + + try: + LOGGER.info(f'Attempting to connect to {url} with headers {self.__headers} and timeout {self.__timeout}') + response = requests.get(url, timeout=self.__timeout, verify=False, headers=self.__headers, auth=self.__auth) + LOGGER.info(f'Received response: {response.status_code}, content: {response.text}') + response.raise_for_status() + self.__initial_data = response.json() + self.__started.set() + LOGGER.info('Connection successful') + return True + except requests.exceptions.RequestException as e: + LOGGER.error(f'Connection failed: {e}') + return False + + def Disconnect(self) -> bool: + LOGGER.info("Disconnecting QKDDriver") + with self.__lock: + self.__terminate.set() + LOGGER.info("QKDDriver disconnected successfully") + return True + + @metered_subclass_method(METRICS_POOL) + def GetInitialConfig(self) -> List[Tuple[str, Any]]: + LOGGER.info("Getting initial configuration") + with self.__lock: + if isinstance(self.__initial_data, dict): + initial_config = [('qkd_node', self.__initial_data.get('qkd_node', {}))] + LOGGER.info(f"Initial configuration: {initial_config}") + return initial_config + LOGGER.warning("Initial data is not a dictionary") + return [] + + @metered_subclass_method(METRICS_POOL) + def GetConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: + chk_type('resources', resource_keys, list) + LOGGER.info(f"Getting configuration for resource_keys: {resource_keys}") + results = [] + with self.__lock: + if not resource_keys: + resource_keys = ALL_RESOURCE_KEYS + for i, resource_key in enumerate(resource_keys): + chk_string(f'resource_key[{i}]', resource_key, allow_empty=False) + LOGGER.info(f"Retrieving resource key: {resource_key}") + resource_results = config_getter( + self.__qkd_root, resource_key, timeout=self.__timeout, headers=self.__headers, auth=self.__auth) + results.extend(resource_results) + LOGGER.info(f"Resource results for {resource_key}: {resource_results}") + LOGGER.info(f"Final configuration results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: + return results + + with self.__lock: + for resource_key, resource_value in resources: + LOGGER.info('Processing resource_key = {:s}'.format(str(resource_key))) + + # Only process '/link' keys + if resource_key.startswith('/link'): + try: + # Ensure resource_value is deserialized + if isinstance(resource_value, str): + resource_value = json.loads(resource_value) + + # Extract values from resource_value dictionary + link_uuid = resource_value['uuid'] + node_id_src = resource_value['src_qkdn_id'] + interface_id_src = resource_value['src_interface_id'] + node_id_dst = resource_value['dst_qkdn_id'] + interface_id_dst = resource_value['dst_interface_id'] + virt_prev_hop = resource_value.get('virt_prev_hop') + virt_next_hops = resource_value.get('virt_next_hops') + virt_bandwidth = resource_value.get('virt_bandwidth') + + # Call create_connectivity_link with the extracted values + LOGGER.info(f"Creating connectivity link with UUID: {link_uuid}") + data = create_connectivity_link( + self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + virt_prev_hop, virt_next_hops, virt_bandwidth, + timeout=self.__timeout, auth=self.__auth + ) + + # Append success result + results.append(True) + LOGGER.info(f"Connectivity link {link_uuid} created successfully") + + except Exception as e: + # Catch and log any unhandled exceptions + LOGGER.exception(f'Unhandled error processing resource_key({resource_key})') + results.append(e) + else: + # Skip unsupported resource keys and append success + results.append(True) + + # Logging test results + LOGGER.info('Test keys: ' + str([x for x,y in resources])) + LOGGER.info('Test values: ' + str(results)) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"Deleting configuration for resources: {resources}") + results = [] + if not resources: + LOGGER.warning("No resources provided for DeleteConfig") + return results + with self.__lock: + for resource in resources: + LOGGER.info(f'Resource to delete: {resource}') + uuid = resource[1].get('uuid') + if uuid: + LOGGER.info(f'Resource with UUID {uuid} deleted successfully') + results.append(True) + else: + LOGGER.warning(f"UUID not found in resource: {resource}") + results.append(False) + LOGGER.info(f"DeleteConfig results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def SubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"Subscribing to state updates: {subscriptions}") + results = [True for _ in subscriptions] + LOGGER.info(f"Subscription results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"Unsubscribing from state updates: {subscriptions}") + results = [True for _ in subscriptions] + LOGGER.info(f"Unsubscription results: {results}") + return results + + @metered_subclass_method(METRICS_POOL) + def GetState(self, blocking=False, terminate: Optional[threading.Event] = None) -> Union[dict, list]: + LOGGER.info(f"GetState called with blocking={blocking}, terminate={terminate}") + url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node' + try: + LOGGER.info(f"Making GET request to {url} to retrieve state") + response = requests.get(url, timeout=self.__timeout, verify=False, headers=self.__headers, auth=self.__auth) + LOGGER.info(f"Received state response: {response.status_code}, content: {response.text}") + response.raise_for_status() + state_data = response.json() + LOGGER.info(f"State data retrieved: {state_data}") + return state_data + except requests.exceptions.Timeout: + LOGGER.error(f'Timeout getting state from {self.__qkd_root}') + return [] + except Exception as e: + LOGGER.error(f'Exception getting state from {self.__qkd_root}: {e}') + return [] diff --git a/src/device/service/drivers/qkd/Tools.py b/src/device/service/drivers/qkd/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..c17d01915dcdda55b36317c683fd60524c97239b --- /dev/null +++ b/src/device/service/drivers/qkd/Tools.py @@ -0,0 +1,173 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Dict, Optional, Set +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES +from . import RESOURCE_APPS, RESOURCE_LINKS, RESOURCE_CAPABILITES, RESOURCE_NODE + + +LOGGER = logging.getLogger(__name__) + +HTTP_OK_CODES = { + 200, # OK + 201, # Created + 202, # Accepted + 204, # No Content +} + +def find_key(resource, key): + return json.loads(resource[1])[key] + + + +def config_getter( + root_url : str, resource_key : str, auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None, + node_ids : Set[str] = set(), headers={} +): + # getting endpoints + + url = root_url + '/restconf/data/etsi-qkd-sdn-node:qkd_node/' + + + result = [] + + try: + if resource_key in [RESOURCE_ENDPOINTS, RESOURCE_INTERFACES]: + url += 'qkd_interfaces/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + interfaces = r.json()['qkd_interfaces']['qkd_interface'] + + # If it's a physical endpoint + if resource_key == RESOURCE_ENDPOINTS: + for interface in interfaces: + resource_value = interface.get('qkdi_att_point', {}) + if 'device' in resource_value and 'port' in resource_value: + uuid = '{}:{}'.format(resource_value['device'], resource_value['port']) + resource_key = '/endpoints/endpoint[{:s}]'.format(uuid) + resource_value['uuid'] = uuid + + sample_types = {} + metric_name = 'KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS' + metric_id = 301 + metric_name = metric_name.lower().replace('kpisampletype_', '') + monitoring_resource_key = '{:s}/state/{:s}'.format(resource_key, metric_name) + sample_types[metric_id] = monitoring_resource_key + + + + resource_value['sample_types'] = sample_types + + + + result.append((resource_key, resource_value)) + else: + for interface in interfaces: + resource_key = '/interface[{:s}]'.format(interface['qkdi_id']) + endpoint_value = interface.get('qkdi_att_point', {}) + + if 'device' in endpoint_value and 'port' in endpoint_value: + name = '{}:{}'.format(endpoint_value['device'], endpoint_value['port']) + interface['name'] = name + interface['enabled'] = True # For test purpose only + + result.append((resource_key, interface)) + + elif resource_key in [RESOURCE_LINKS, RESOURCE_NETWORK_INSTANCES]: + url += 'qkd_links/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + links = r.json()['qkd_links']['qkd_link'] + + if resource_key == RESOURCE_LINKS: + for link in links: + link_type = link.get('qkdl_type', 'Direct') + + if link_type == 'Direct': + resource_key = '/link[{:s}]'.format(link['qkdl_id']) + result.append((resource_key, link)) + else: + for link in links: + link_type = link.get('qkdl_type', 'Direct') + + if link_type == 'Virtual': + resource_key = '/service[{:s}]'.format(link['qkdl_id']) + result.append((resource_key, link)) + + elif resource_key == RESOURCE_APPS: + url += 'qkd_applications/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + apps = r.json()['qkd_applications']['qkd_app'] + + for app in apps: + resource_key = '/app[{:s}]'.format(app['app_id']) + result.append((resource_key, app)) + + + elif resource_key == RESOURCE_CAPABILITES: + url += 'qkdn_capabilities/' + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + capabilities = r.json()['qkdn_capabilities'] + + result.append((resource_key, capabilities)) + + elif resource_key == RESOURCE_NODE: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + node = r.json()['qkd_node'] + + result.append((resource_key, node)) + + except requests.exceptions.Timeout: + LOGGER.exception('Timeout connecting {:s}'.format(url)) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Exception retrieving/parsing endpoints for {:s}'.format(resource_key)) + result.append((resource_key, e)) + + + return result + + + +def create_connectivity_link( + root_url, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, + virt_prev_hop = None, virt_next_hops = None, virt_bandwidth = None, + auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None, headers={} +): + + url = root_url + '/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_links/' + is_virtual = bool(virt_prev_hop or virt_next_hops) + + qkd_link = { + 'qkdl_id': link_uuid, + 'qkdl_type': 'etsi-qkd-node-types:' + ('VIRT' if is_virtual else 'PHYS'), + 'qkdl_local': { + 'qkdn_id': node_id_src, + 'qkdi_id': interface_id_src + }, + 'qkdl_remote': { + 'qkdn_id': node_id_dst, + 'qkdi_id': interface_id_dst + } + } + + if is_virtual: + qkd_link['virt_prev_hop'] = virt_prev_hop + qkd_link['virt_next_hop'] = virt_next_hops or [] + qkd_link['virt_bandwidth'] = virt_bandwidth + + + data = {'qkd_links': {'qkd_link': [qkd_link]}} + + requests.post(url, json=data, headers=headers) + diff --git a/src/device/service/drivers/qkd/Tools2.py b/src/device/service/drivers/qkd/Tools2.py new file mode 100644 index 0000000000000000000000000000000000000000..c598c7443d276ea0eb76ce761d173de9944c3cfb --- /dev/null +++ b/src/device/service/drivers/qkd/Tools2.py @@ -0,0 +1,272 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import requests +from typing import Dict, Optional, Set, List, Tuple, Union, Any +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES +from . import RESOURCE_APPS, RESOURCE_LINKS, RESOURCE_CAPABILITES, RESOURCE_NODE + +LOGGER = logging.getLogger(__name__) + +HTTP_OK_CODES = {200, 201, 202, 204} + +def find_key(resource: Tuple[str, str], key: str) -> Any: + """ + Extracts a specific key from a JSON resource. + """ + return json.loads(resource[1]).get(key) + + +def config_getter( + root_url: str, resource_key: str, auth: Optional[Any] = None, timeout: Optional[int] = None, + node_ids: Set[str] = set(), headers: Dict[str, str] = {} +) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches configuration data from a QKD node for a specified resource key. + Returns a list of tuples containing the resource key and the corresponding data or exception. + The function is agnostic to authentication: headers and auth are passed from external sources. + """ + url = f"{root_url}/restconf/data/etsi-qkd-sdn-node:qkd_node/" + LOGGER.info(f"Fetching configuration for {resource_key} from {root_url}") + + try: + if resource_key in [RESOURCE_ENDPOINTS, RESOURCE_INTERFACES]: + return fetch_interfaces(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_LINKS, RESOURCE_NETWORK_INSTANCES]: + return fetch_links(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_APPS]: + return fetch_apps(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_CAPABILITES]: + return fetch_capabilities(url, resource_key, headers, auth, timeout) + + elif resource_key in [RESOURCE_NODE]: + return fetch_node(url, resource_key, headers, auth, timeout) + + else: + LOGGER.warning(f"Unknown resource key: {resource_key}") + return [(resource_key, ValueError(f"Unknown resource key: {resource_key}"))] + + except requests.exceptions.RequestException as e: + LOGGER.error(f'Error retrieving/parsing {resource_key} from {url}: {e}') + return [(resource_key, e)] + + +def fetch_interfaces(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches interface data from the QKD node. Adapts to both mocked and real QKD data structures. + """ + result = [] + url += 'qkd_interfaces/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + + # Handle both real and mocked QKD response structures + response_data = r.json() + + if isinstance(response_data.get('qkd_interfaces'), dict): + interfaces = response_data.get('qkd_interfaces', {}).get('qkd_interface', []) + else: + interfaces = response_data.get('qkd_interface', []) + + for interface in interfaces: + if resource_key in [RESOURCE_ENDPOINTS]: + # Handle real QKD data format + resource_value = interface.get('qkdi_att_point', {}) + if 'device' in resource_value and 'port' in resource_value: + uuid = f"{resource_value['device']}:{resource_value['port']}" + resource_key_with_uuid = f"/endpoints/endpoint[{uuid}]" + resource_value['uuid'] = uuid + + # Add sample types (for demonstration purposes) + sample_types = {} + metric_name = 'KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS' + metric_id = 301 + metric_name = metric_name.lower().replace('kpisampletype_', '') + monitoring_resource_key = '{:s}/state/{:s}'.format(resource_key, metric_name) + sample_types[metric_id] = monitoring_resource_key + resource_value['sample_types'] = sample_types + + result.append((resource_key_with_uuid, resource_value)) + + else: + # Handle both real and mocked QKD formats + endpoint_value = interface.get('qkdi_att_point', {}) + if 'device' in endpoint_value and 'port' in endpoint_value: + # Real QKD data format + interface_uuid = f"{endpoint_value['device']}:{endpoint_value['port']}" + interface['uuid'] = interface_uuid + interface['name'] = interface_uuid + interface['enabled'] = True # Assume enabled for real data + else: + # Mocked QKD data format + interface_uuid = interface.get('uuid', f"/interface[{interface['qkdi_id']}]") + interface['uuid'] = interface_uuid + interface['name'] = interface.get('name', interface_uuid) + interface['enabled'] = interface.get('enabled', False) # Mocked enabled status + + result.append((f"/interface[{interface['qkdi_id']}]", interface)) + + except requests.RequestException as e: + LOGGER.error(f"Error fetching interfaces from {url}: {e}") + result.append((resource_key, e)) + + return result + +def fetch_links(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches link data from the QKD node. Adapts to both mocked and real QKD data structures. + """ + result = [] + + if resource_key in [RESOURCE_LINKS, RESOURCE_NETWORK_INSTANCES]: + url += 'qkd_links/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + + # Handle real and mocked QKD data structures + links = r.json().get('qkd_links', []) + + for link in links: + # For real QKD format (QKD links returned as dictionary objects) + if isinstance(link, dict): + qkdl_id = link.get('qkdl_id') + link_type = link.get('qkdl_type', 'Direct') + + # Handle both real (PHYS, VIRT) and mocked (DIRECT) link types + if link_type == 'PHYS' or link_type == 'VIRT': + resource_key_direct = f"/link[{qkdl_id}]" + result.append((resource_key_direct, link)) + elif link_type == 'DIRECT': + # Mocked QKD format has a slightly different structure + result.append((f"/link/link[{qkdl_id}]", link)) + + # For mocked QKD format (QKD links returned as lists) + elif isinstance(link, list): + for l in link: + qkdl_id = l.get('uuid') + link_type = l.get('type', 'Direct') + + if link_type == 'DIRECT': + resource_key_direct = f"/link/link[{qkdl_id}]" + result.append((resource_key_direct, l)) + + except requests.RequestException as e: + LOGGER.error(f"Error fetching links from {url}: {e}") + result.append((resource_key, e)) + + return result + +def fetch_apps(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches application data from the QKD node. + """ + result = [] + url += 'qkd_applications/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + + apps = r.json().get('qkd_applications', {}).get('qkd_app', []) + for app in apps: + result.append((f"/app[{app['app_id']}]", app)) + except requests.RequestException as e: + LOGGER.error(f"Error fetching applications from {url}: {e}") + result.append((resource_key, e)) + + return result + + +def fetch_capabilities(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches capabilities data from the QKD node. + """ + result = [] + url += 'qkdn_capabilities/' + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + result.append((resource_key, r.json())) + except requests.RequestException as e: + LOGGER.error(f"Error fetching capabilities from {url}: {e}") + result.append((resource_key, e)) + + return result + + +def fetch_node(url: str, resource_key: str, headers: Dict[str, str], auth: Optional[Any], timeout: Optional[int]) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]: + """ + Fetches node data from the QKD node. + """ + result = [] + + try: + r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + result.append((resource_key, r.json().get('qkd_node', {}))) + except requests.RequestException as e: + LOGGER.error(f"Error fetching node from {url}: {e}") + result.append((resource_key, e)) + + return result + + +def create_connectivity_link( + root_url: str, link_uuid: str, node_id_src: str, interface_id_src: str, node_id_dst: str, interface_id_dst: str, + virt_prev_hop: Optional[str] = None, virt_next_hops: Optional[List[str]] = None, virt_bandwidth: Optional[int] = None, + auth: Optional[Any] = None, timeout: Optional[int] = None, headers: Dict[str, str] = {} +) -> Union[bool, Exception]: + """ + Creates a connectivity link between QKD nodes using the provided parameters. + """ + url = f"{root_url}/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_links/" + + qkd_link = { + 'qkdl_id': link_uuid, + 'qkdl_type': 'etsi-qkd-node-types:' + ('VIRT' if virt_prev_hop or virt_next_hops else 'PHYS'), + 'qkdl_local': {'qkdn_id': node_id_src, 'qkdi_id': interface_id_src}, + 'qkdl_remote': {'qkdn_id': node_id_dst, 'qkdi_id': interface_id_dst} + } + + if virt_prev_hop or virt_next_hops: + qkd_link['virt_prev_hop'] = virt_prev_hop + qkd_link['virt_next_hop'] = virt_next_hops or [] + qkd_link['virt_bandwidth'] = virt_bandwidth + + data = {'qkd_links': {'qkd_link': [qkd_link]}} + + LOGGER.info(f"Creating connectivity link with payload: {json.dumps(data)}") + + try: + r = requests.post(url, json=data, timeout=timeout, verify=False, auth=auth, headers=headers) + r.raise_for_status() + if r.status_code in HTTP_OK_CODES: + LOGGER.info(f"Link {link_uuid} created successfully.") + return True + else: + LOGGER.error(f"Failed to create link {link_uuid}, status code: {r.status_code}") + return False + except requests.exceptions.RequestException as e: + LOGGER.error(f"Exception creating link {link_uuid} with payload {json.dumps(data)}: {e}") + return e diff --git a/src/device/service/drivers/qkd/__init__.py b/src/device/service/drivers/qkd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e24e5523a216f79dcec21f2ac21b2262426acc04 --- /dev/null +++ b/src/device/service/drivers/qkd/__init__.py @@ -0,0 +1,27 @@ +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES + +RESOURCE_LINKS = '__links__' +RESOURCE_APPS = '__apps__' +RESOURCE_CAPABILITES = '__capabilities__' +RESOURCE_NODE = '__node__' + + +ALL_RESOURCE_KEYS = [ + RESOURCE_ENDPOINTS, + RESOURCE_INTERFACES, + RESOURCE_NETWORK_INSTANCES, + RESOURCE_LINKS, + RESOURCE_APPS, + RESOURCE_CAPABILITES, + RESOURCE_NODE +] + +RESOURCE_KEY_MAPPINGS = { + RESOURCE_ENDPOINTS : 'component', + RESOURCE_INTERFACES : 'interface', + RESOURCE_NETWORK_INSTANCES: 'network_instance', + RESOURCE_LINKS : 'links', + RESOURCE_APPS : 'apps', + RESOURCE_CAPABILITES : 'capabilities', + RESOURCE_NODE : 'node' +} diff --git a/src/device/tests/qkd/integration/test_external_qkd_retrieve_information.py b/src/device/tests/qkd/integration/test_external_qkd_retrieve_information.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb91191a96d0b3b6cfeef107b50a881c2261e60 --- /dev/null +++ b/src/device/tests/qkd/integration/test_external_qkd_retrieve_information.py @@ -0,0 +1,184 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import requests +import json +import os +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +from device.service.drivers.qkd.Tools2 import ( + RESOURCE_INTERFACES, + RESOURCE_LINKS, + RESOURCE_CAPABILITES, + RESOURCE_NODE, + RESOURCE_APPS +) + +# Test ID: INT_LQ_Test_01 (QKD Node Authentication) +# Function to retrieve JWT token +def get_jwt_token(node_address, port, username, password): + """ Retrieve JWT token from a node's login endpoint if it's secured. """ + login_url = f"http://{node_address}:{port}/login" + payload = {'username': username, 'password': password} + try: + print(f"Attempting to retrieve JWT token from {login_url}...") + response = requests.post(login_url, headers={'Content-Type': 'application/x-www-form-urlencoded'}, data=payload) + response.raise_for_status() + print(f"Successfully retrieved JWT token from {login_url}") + return response.json().get('access_token') + except requests.exceptions.RequestException as e: + print(f"Failed to retrieve JWT token from {login_url}: {e}") + return None + + +# Environment variables for sensitive information +QKD1_ADDRESS = os.getenv("QKD1_ADDRESS") +QKD2_ADDRESS = os.getenv("QKD2_ADDRESS") +PORT = os.getenv("QKD_PORT") +USERNAME = os.getenv("QKD_USERNAME") +PASSWORD = os.getenv("QKD_PASSWORD") + +# Pytest fixture to initialize QKDDriver with token for Node 1 +@pytest.fixture +def driver_qkd1(): + token = get_jwt_token(QKD1_ADDRESS, PORT, USERNAME, PASSWORD) + headers = {'Authorization': f'Bearer {token}'} if token else {} + return QKDDriver(address=QKD1_ADDRESS, port=PORT, headers=headers) + +# Pytest fixture to initialize QKDDriver with token for Node 2 +@pytest.fixture +def driver_qkd2(): + token = get_jwt_token(QKD2_ADDRESS, PORT, USERNAME, PASSWORD) + headers = {'Authorization': f'Bearer {token}'} if token else {} + return QKDDriver(address=QKD2_ADDRESS, port=PORT, headers=headers) + +# Utility function to save data to a JSON file, filtering out non-serializable objects +def save_json_file(filename, data): + serializable_data = filter_serializable(data) + with open(filename, 'w') as f: + json.dump(serializable_data, f, indent=2) + print(f"Saved data to {filename}") + +# Function to filter out non-serializable objects like HTTPError +def filter_serializable(data): + if isinstance(data, list): + return [filter_serializable(item) for item in data if not isinstance(item, requests.exceptions.RequestException)] + elif isinstance(data, dict): + return {key: filter_serializable(value) for key, value in data.items() if not isinstance(value, requests.exceptions.RequestException)} + return data + +# Utility function to print the retrieved data for debugging, handling errors +def print_data(label, data): + try: + print(f"{label}: {json.dumps(data, indent=2)}") + except TypeError as e: + print(f"Error printing {label}: {e}, Data: {data}") + +# General function to retrieve and handle HTTP errors +def retrieve_data(driver_qkd, resource, resource_name): + try: + data = driver_qkd.GetConfig([resource]) + assert isinstance(data, list), f"Expected a list for {resource_name}" + assert len(data) > 0, f"No {resource_name} found in the system" + return data + except requests.exceptions.HTTPError as e: + print(f"HTTPError while fetching {resource_name}: {e}") + return None + except AssertionError as e: + print(f"AssertionError: {e}") + return None + +# Test ID: INT_LQ_Test_02 (QKD Node Capabilities) +def retrieve_capabilities(driver_qkd, node_name): + capabilities = retrieve_data(driver_qkd, RESOURCE_CAPABILITES, "capabilities") + if capabilities: + print_data(f"{node_name} Capabilities", capabilities) + return capabilities + +# Test ID: INT_LQ_Test_03 (QKD Interfaces) +def retrieve_interfaces(driver_qkd, node_name): + interfaces = retrieve_data(driver_qkd, RESOURCE_INTERFACES, "interfaces") + if interfaces: + print_data(f"{node_name} Interfaces", interfaces) + return interfaces + +# Test ID: INT_LQ_Test_04 (QKD Links) +def retrieve_links(driver_qkd, node_name): + links = retrieve_data(driver_qkd, RESOURCE_LINKS, "links") + if links: + print_data(f"{node_name} Links", links) + return links + +# Test ID: INT_LQ_Test_05 (QKD Link Metrics) +def retrieve_link_metrics(driver_qkd, node_name): + links = retrieve_links(driver_qkd, node_name) + if links: + for link in links: + if 'performance_metrics' in link[1]: + print_data(f"{node_name} Link Metrics", link[1]['performance_metrics']) + else: + print(f"No metrics found for link {link[0]}") + return links + +# Test ID: INT_LQ_Test_06 (QKD Applications) +def retrieve_applications(driver_qkd, node_name): + applications = retrieve_data(driver_qkd, RESOURCE_APPS, "applications") + if applications: + print_data(f"{node_name} Applications", applications) + return applications + +# Test ID: INT_LQ_Test_07 (System Health Check) +def retrieve_node_data(driver_qkd, node_name): + node_data = retrieve_data(driver_qkd, RESOURCE_NODE, "node data") + if node_data: + print_data(f"{node_name} Node Data", node_data) + return node_data + +# Main test to retrieve and save data from QKD1 and QKD2 to files +def test_retrieve_and_save_data(driver_qkd1, driver_qkd2): + # Retrieve data for QKD1 + qkd1_interfaces = retrieve_interfaces(driver_qkd1, "QKD1") + qkd1_links = retrieve_links(driver_qkd1, "QKD1") + qkd1_capabilities = retrieve_capabilities(driver_qkd1, "QKD1") + qkd1_node_data = retrieve_node_data(driver_qkd1, "QKD1") + qkd1_apps = retrieve_applications(driver_qkd1, "QKD1") + + qkd1_data = { + "interfaces": qkd1_interfaces, + "links": qkd1_links, + "capabilities": qkd1_capabilities, + "apps": qkd1_apps, + "node_data": qkd1_node_data + } + + # Save QKD1 data to file + save_json_file('qkd1_data.json', qkd1_data) + + # Retrieve data for QKD2 + qkd2_interfaces = retrieve_interfaces(driver_qkd2, "QKD2") + qkd2_links = retrieve_links(driver_qkd2, "QKD2") + qkd2_capabilities = retrieve_capabilities(driver_qkd2, "QKD2") + qkd2_node_data = retrieve_node_data(driver_qkd2, "QKD2") + qkd2_apps = retrieve_applications(driver_qkd2, "QKD2") + + qkd2_data = { + "interfaces": qkd2_interfaces, + "links": qkd2_links, + "capabilities": qkd2_capabilities, + "apps": qkd2_apps, + "node_data": qkd2_node_data + } + + # Save QKD2 data to file + save_json_file('qkd2_data.json', qkd2_data) diff --git a/src/device/tests/qkd/unit/PrepareScenario.py b/src/device/tests/qkd/unit/PrepareScenario.py new file mode 100644 index 0000000000000000000000000000000000000000..756b914d55df788472ed6e839e1fc29e356877e4 --- /dev/null +++ b/src/device/tests/qkd/unit/PrepareScenario.py @@ -0,0 +1,126 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, os, time, logging +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_HTTP, + get_env_var_name, get_service_port_http +) +from context.client.ContextClient import ContextClient +from nbi.service.rest_server.RestServer import RestServer +from nbi.service.rest_server.nbi_plugins.tfs_api import register_tfs_api +from device.client.DeviceClient import DeviceClient +from device.service.DeviceService import DeviceService +from device.service.driver_api.DriverFactory import DriverFactory +from device.service.driver_api.DriverInstanceCache import DriverInstanceCache +from device.service.drivers import DRIVERS +from device.tests.CommonObjects import CONTEXT, TOPOLOGY +from device.tests.MockService_Dependencies import MockService_Dependencies +from monitoring.client.MonitoringClient import MonitoringClient +from requests import codes as requests_codes +import requests + +# Constants +LOCAL_HOST = '127.0.0.1' +MOCKSERVICE_PORT = 8080 + +# Get dynamic port for NBI service +NBI_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_http(ServiceNameEnum.NBI) + +# Set environment variables for the NBI service host and port +os.environ[get_env_var_name(ServiceNameEnum.NBI, ENVVAR_SUFIX_SERVICE_HOST)] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.NBI, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(NBI_SERVICE_PORT) + +# Expected status codes for requests +EXPECTED_STATUS_CODES = {requests_codes['OK'], requests_codes['CREATED'], requests_codes['ACCEPTED'], requests_codes['NO_CONTENT']} + +# Debugging output for the port number +print(f"MOCKSERVICE_PORT: {MOCKSERVICE_PORT}") +print(f"NBI_SERVICE_PORT: {NBI_SERVICE_PORT}") + +@pytest.fixture(scope='session') +def mock_service(): + _service = MockService_Dependencies(MOCKSERVICE_PORT) + _service.configure_env_vars() + _service.start() + yield _service + _service.stop() + +@pytest.fixture(scope='session') +def nbi_service_rest(mock_service): # Pass the `mock_service` as an argument if needed + _rest_server = RestServer() + register_tfs_api(_rest_server) # Register the TFS API with the REST server + _rest_server.start() + time.sleep(1) # Give time for the server to start + yield _rest_server + _rest_server.shutdown() + _rest_server.join() + +@pytest.fixture(scope='session') +def context_client(mock_service): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_service(context_client, monitoring_client): + _driver_factory = DriverFactory(DRIVERS) + _driver_instance_cache = DriverInstanceCache(_driver_factory) + _service = DeviceService(_driver_instance_cache) + _service.start() + yield _service + _service.stop() + +@pytest.fixture(scope='session') +def device_client(device_service): + _client = DeviceClient() + yield _client + _client.close() + +# General request function +def do_rest_request(method, url, body=None, timeout=10, allow_redirects=True, logger=None): + # Construct the request URL with NBI service port + request_url = f"http://{LOCAL_HOST}:{NBI_SERVICE_PORT}{url}" + + # Log the request details for debugging + if logger: + msg = f"Request: {method.upper()} {request_url}" + if body: + msg += f" body={body}" + logger.warning(msg) + + # Send the request + reply = requests.request(method, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects) + + # Log the response details for debugging + if logger: + logger.warning(f"Reply: {reply.text}") + + # Print status code and response for debugging instead of asserting + print(f"Status code: {reply.status_code}") + print(f"Response: {reply.text}") + + # Return the JSON response if present + if reply.content: + return reply.json() + return None + +# Function for GET requests +def do_rest_get_request(url, body=None, timeout=10, allow_redirects=True, logger=None): + return do_rest_request('get', url, body, timeout, allow_redirects, logger=logger) + +# Function for POST requests +def do_rest_post_request(url, body=None, timeout=10, allow_redirects=True, logger=None): + return do_rest_request('post', url, body, timeout, allow_redirects, logger=logger) diff --git a/src/device/tests/qkd/unit/retrieve_device_mock_information.py b/src/device/tests/qkd/unit/retrieve_device_mock_information.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e18f51d4ce788cbc40dc6befe101df65ef765f --- /dev/null +++ b/src/device/tests/qkd/unit/retrieve_device_mock_information.py @@ -0,0 +1,104 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, urllib +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader +from context.client.ContextClient import ContextClient +from nbi.service.rest_server.RestServer import RestServer +from common.tools.object_factory.Context import json_context_id +from device.tests.qkd.unit.PrepareScenario import mock_service, nbi_service_rest, do_rest_get_request + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID) + + +# ----- Context -------------------------------------------------------------------------------------------------------- + +def test_rest_get_context_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/context_ids') + print("Context IDs:", reply) + +def test_rest_get_contexts(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/contexts') + print("Contexts:", reply) + +def test_rest_get_context(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}') + print("Context data:", reply) + + +# ----- Topology ------------------------------------------------------------------------------------------------------- + +def test_rest_get_topology_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/topology_ids') + print("Topology IDs:", reply) + +def test_rest_get_topologies(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/topologies') + print("Topologies:", reply) + +def test_rest_get_topology(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/topology/{topology_uuid}') + print("Topology data:", reply) + + +# ----- Device --------------------------------------------------------------------------------------------------------- + +def test_rest_get_device_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/device_ids') + print("Device IDs:", reply) + +def test_rest_get_devices(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/devices') + print("Devices:", reply) + + +# ----- Link ----------------------------------------------------------------------------------------------------------- + +def test_rest_get_link_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/link_ids') + print("Link IDs:", reply) + +def test_rest_get_links(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/links') + print("Links:", reply) + + +# ----- Service -------------------------------------------------------------------------------------------------------- + +def test_rest_get_service_ids(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + reply = do_rest_get_request('/tfs-api/link_ids') + print("Service IDs:", reply) + +def test_rest_get_topologies(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/services') + print("Services:", reply) + +# ----- Apps ----------------------------------------------------------------------------------------------------------- + +def test_rest_get_apps(nbi_service_rest: RestServer): # pylint: disable=redefined-outer-name, unused-argument + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) # Context ID + reply = do_rest_get_request(f'/tfs-api/context/{context_uuid}/apps') + print("Apps:", reply) diff --git a/src/device/tests/qkd/unit/test_application_deployment.py b/src/device/tests/qkd/unit/test_application_deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..92e16663b41556563aab884be2ee48518cd15ff7 --- /dev/null +++ b/src/device/tests/qkd/unit/test_application_deployment.py @@ -0,0 +1,47 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import json +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver with the appropriate settings + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + +def test_application_deployment(qkd_driver): + qkd_driver.Connect() + + # Application registration data + app_data = { + 'qkd_app': [ + { + 'app_id': '00000001-0001-0000-0000-000000000001', + 'client_app_id': [], + 'app_statistics': {'statistics': []}, + 'app_qos': {}, + 'backing_qkdl_id': [] + } + ] + } + + # Send a POST request to create the application + response = qkd_driver.SetConfig([('/qkd_applications/qkd_app', json.dumps(app_data))]) + + # Verify response + assert response[0] is True, "Expected application registration to succeed" diff --git a/src/device/tests/qkd/unit/test_mock_qkd_node.py b/src/device/tests/qkd/unit/test_mock_qkd_node.py new file mode 100644 index 0000000000000000000000000000000000000000..f679a8389476c35a6881b7dc6484baab8ef4e20b --- /dev/null +++ b/src/device/tests/qkd/unit/test_mock_qkd_node.py @@ -0,0 +1,31 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import requests +from requests.exceptions import ConnectionError + +def test_mock_qkd_node_responses(): + response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node') + assert response.status_code == 200 + data = response.json() + assert 'qkd_node' in data + +def test_mock_node_failure_scenarios(): + try: + response = requests.get('http://127.0.0.1:12345/restconf/data/etsi-qkd-sdn-node:qkd_node') + except ConnectionError as e: + assert isinstance(e, ConnectionError) + else: + pytest.fail("ConnectionError not raised as expected") diff --git a/src/device/tests/qkd/unit/test_qkd_compliance.py b/src/device/tests/qkd/unit/test_qkd_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..2f305888e952e7d9acc3e96ffc1e427a7cc85685 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_compliance.py @@ -0,0 +1,24 @@ + +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import requests +from tests.tools.mock_qkd_nodes.YangValidator import YangValidator + +def test_compliance_with_yang_models(): + validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types']) + response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node') + data = response.json() + assert validator.parse_to_dict(data) is not None diff --git a/src/device/tests/qkd/unit/test_qkd_configuration.py b/src/device/tests/qkd/unit/test_qkd_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..15c4787c28a92ed07d8666b4c715954da1e690d6 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_configuration.py @@ -0,0 +1,230 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import json +from requests.exceptions import HTTPError +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +import requests +from device.service.drivers.qkd.Tools2 import ( + RESOURCE_INTERFACES, + RESOURCE_LINKS, + RESOURCE_ENDPOINTS, + RESOURCE_APPS, + RESOURCE_CAPABILITES, + RESOURCE_NODE +) + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver with the appropriate settings, ensure correct JWT headers are included + token = "YOUR_JWT_TOKEN" # Replace with your actual JWT token + if not token: + pytest.fail("JWT token is missing. Make sure to generate a valid JWT token.") + headers = {"Authorization": f"Bearer {token}"} + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, headers=headers) + +# Utility function to print the retrieved data for debugging +def print_data(label, data): + print(f"{label}: {json.dumps(data, indent=2)}") + +# Test ID: SBI_Test_03 (Initial Config Retrieval) +def test_initial_config_retrieval(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate the initial configuration + config = qkd_driver.GetInitialConfig() + + # Since GetInitialConfig returns a list, adjust the assertions accordingly + assert isinstance(config, list), "Expected a list for initial config" + assert len(config) > 0, "Initial config should not be empty" + + # Output for debugging + print_data("Initial Config", config) + +# Test ID: INT_LQ_Test_05 (QKD Devices Retrieval) +def test_retrieve_devices(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate device information + devices = qkd_driver.GetConfig([RESOURCE_NODE]) + assert isinstance(devices, list), "Expected a list of devices" + + if not devices: + pytest.skip("No devices found in the system. Skipping device test.") + + for device in devices: + assert isinstance(device, tuple), "Each device entry must be a tuple" + assert isinstance(device[1], dict), "Device data must be a dictionary" + if isinstance(device[1], Exception): + pytest.fail(f"Error retrieving devices: {device[1]}") + + # Output for debugging + print_data("Devices", devices) + +# Test ID: INT_LQ_Test_04 (QKD Links Retrieval) +def test_retrieve_links(qkd_driver): + qkd_driver.Connect() + + try: + # Fetch the links using the correct resource key + links = qkd_driver.GetConfig([RESOURCE_LINKS]) + assert isinstance(links, list), "Expected a list of tuples (resource key, data)." + + if len(links) == 0: + pytest.skip("No links found in the system, skipping link validation.") + + for link in links: + assert isinstance(link, tuple), "Each link entry must be a tuple" + resource_key, link_data = link # Unpack the tuple + + # Handle HTTPError or exception in the response + if isinstance(link_data, requests.exceptions.HTTPError): + pytest.fail(f"Failed to retrieve links due to HTTP error: {link_data}") + + if isinstance(link_data, dict): + # For real QKD data (links as dictionaries) + assert 'qkdl_id' in link_data, "Missing 'qkdl_id' in link data" + assert 'qkdl_local' in link_data, "Missing 'qkdl_local' in link data" + assert 'qkdl_remote' in link_data, "Missing 'qkdl_remote' in link data" + assert 'qkdl_type' in link_data, "Missing 'qkdl_type' in link data" + + # Check 'virt_prev_hop' only for virtual links (VIRT) + if link_data['qkdl_type'] == 'etsi-qkd-node-types:VIRT': + virt_prev_hop = link_data.get('virt_prev_hop') + assert virt_prev_hop is None or re.match(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', str(virt_prev_hop)), \ + f"Invalid 'virt_prev_hop': {virt_prev_hop}" + + # Print out the link details for debugging + print(f"Link ID: {link_data['qkdl_id']}") + print(f"Link Type: {link_data['qkdl_type']}") + print(f"Local QKD: {json.dumps(link_data['qkdl_local'], indent=2)}") + print(f"Remote QKD: {json.dumps(link_data['qkdl_remote'], indent=2)}") + + elif isinstance(link_data, list): + # For mocked QKD data (links as lists of dictionaries) + for mock_link in link_data: + assert 'uuid' in mock_link, "Missing 'uuid' in mocked link data" + assert 'src_qkdn_id' in mock_link, "Missing 'src_qkdn_id' in mocked link data" + assert 'dst_qkdn_id' in mock_link, "Missing 'dst_qkdn_id' in mocked link data" + + # Print out the mocked link details for debugging + print(f"Mock Link ID: {mock_link['uuid']}") + print(f"Source QKD ID: {mock_link['src_qkdn_id']}") + print(f"Destination QKD ID: {mock_link['dst_qkdn_id']}") + + else: + pytest.fail(f"Unexpected link data format: {type(link_data)}") + + except HTTPError as e: + pytest.fail(f"HTTP error occurred while retrieving links: {e}") + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + +# Test for QKD Services +def test_retrieve_services(qkd_driver): + qkd_driver.Connect() + services = qkd_driver.GetConfig([RESOURCE_ENDPOINTS]) + assert isinstance(services, list), "Expected a list of services" + + if not services: + pytest.skip("No services found in the system. Skipping service test.") + + for service in services: + assert isinstance(service, tuple), "Each service entry must be a tuple" + assert isinstance(service[1], dict), "Service data must be a dictionary" + if isinstance(service[1], Exception): + pytest.fail(f"Error retrieving services: {service[1]}") + + print("Services:", json.dumps(services, indent=2)) + +# Test ID: INT_LQ_Test_07 (QKD Applications Retrieval) +def test_retrieve_applications(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate applications information + applications = qkd_driver.GetConfig([RESOURCE_APPS]) # Adjust to fetch applications using the correct key + assert isinstance(applications, list), "Expected a list of applications" + + if not applications: + pytest.skip("No applications found in the system. Skipping applications test.") + + for app in applications: + assert isinstance(app, tuple), "Each application entry must be a tuple" + assert isinstance(app[1], dict), "Application data must be a dictionary" + if isinstance(app[1], Exception): + pytest.fail(f"Error retrieving applications: {app[1]}") + + # Output for debugging + print_data("Applications", applications) + +# Test ID: INT_LQ_Test_03 (QKD Interfaces Retrieval) +def test_retrieve_interfaces(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate interface information + interfaces = qkd_driver.GetConfig([RESOURCE_INTERFACES]) + + assert isinstance(interfaces, list), "Expected a list of interfaces" + assert len(interfaces) > 0, "No interfaces found in the system" + + for interface in interfaces: + assert isinstance(interface, tuple), "Each interface entry must be a tuple" + assert isinstance(interface[1], dict), "Interface data must be a dictionary" + if isinstance(interface[1], Exception): + pytest.fail(f"Error retrieving interfaces: {interface[1]}") + + # Output for debugging + print_data("Interfaces", interfaces) + +# Test ID: INT_LQ_Test_02 (QKD Capabilities Retrieval) +def test_retrieve_capabilities(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate capabilities information + capabilities = qkd_driver.GetConfig([RESOURCE_CAPABILITES]) + + assert isinstance(capabilities, list), "Expected a list of capabilities" + assert len(capabilities) > 0, "No capabilities found in the system" + + for capability in capabilities: + assert isinstance(capability, tuple), "Each capability entry must be a tuple" + assert isinstance(capability[1], dict), "Capability data must be a dictionary" + if isinstance(capability[1], Exception): + pytest.fail(f"Error retrieving capabilities: {capability[1]}") + + # Output for debugging + print_data("Capabilities", capabilities) + +# Test ID: INT_LQ_Test_03 (QKD Endpoints Retrieval) +def test_retrieve_endpoints(qkd_driver): + qkd_driver.Connect() + + # Retrieve and validate endpoint information + endpoints = qkd_driver.GetConfig([RESOURCE_ENDPOINTS]) + + assert isinstance(endpoints, list), "Expected a list of endpoints" + assert len(endpoints) > 0, "No endpoints found in the system" + + for endpoint in endpoints: + assert isinstance(endpoint, tuple), "Each endpoint entry must be a tuple" + assert isinstance(endpoint[1], dict), "Endpoint data must be a dictionary" + if isinstance(endpoint[1], Exception): + pytest.fail(f"Error retrieving endpoints: {endpoint[1]}") + + # Output for debugging + print_data("Endpoints", endpoints) diff --git a/src/device/tests/qkd/unit/test_qkd_error_hanling.py b/src/device/tests/qkd/unit/test_qkd_error_hanling.py new file mode 100644 index 0000000000000000000000000000000000000000..d93e3711136de496fd39365563032f827cfbe913 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_error_hanling.py @@ -0,0 +1,93 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, requests +from requests.exceptions import ConnectionError, HTTPError, Timeout +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver for testing + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + +def test_invalid_operations_on_network_links(qkd_driver): + """ + Test Case ID: SBI_Test_09 - Perform invalid operations and validate error handling. + Objective: Perform invalid operations on network links and ensure proper error handling and logging. + """ + qkd_driver.Connect() + + # Step 1: Perform invalid operation with an incorrect resource key + invalid_payload = { + "invalid_resource_key": { + "invalid_field": "invalid_value" + } + } + + try: + # Attempt to perform an invalid operation (simulate wrong resource key) + response = requests.post(f'http://{qkd_driver.address}/invalid_resource', json=invalid_payload) + response.raise_for_status() + + except HTTPError as e: + # Step 2: Validate proper error handling and user-friendly messages + print(f"Handled HTTPError: {e}") + assert e.response.status_code in [400, 404], "Expected 400 Bad Request or 404 Not Found for invalid operation." + if e.response.status_code == 404: + assert "Not Found" in e.response.text, "Expected user-friendly 'Not Found' message." + elif e.response.status_code == 400: + assert "Invalid resource key" in e.response.text, "Expected user-friendly 'Bad Request' message." + + except Exception as e: + # Log unexpected exceptions + pytest.fail(f"Unexpected error occurred: {e}") + + finally: + qkd_driver.Disconnect() + +def test_network_failure_simulation(qkd_driver): + """ + Test Case ID: SBI_Test_10 - Simulate network failures and validate resilience and recovery. + Objective: Simulate network failures (e.g., QKD node downtime) and validate system's resilience. + """ + qkd_driver.Connect() + + try: + # Step 1: Simulate network failure (disconnect QKD node, or use unreachable address/port) + qkd_driver_with_failure = QKDDriver(address='127.0.0.1', port=12345, username='user', password='pass') # Valid but incorrect port + + # Try to connect and retrieve state, expecting a failure + response = qkd_driver_with_failure.GetState() + + # Step 2: Validate resilience and recovery mechanisms + # Check if the response is empty, indicating a failure to retrieve state + if not response: + print("Network failure simulated successfully and handled.") + else: + pytest.fail("Expected network failure but received a valid response.") + + except HTTPError as e: + # Log HTTP errors as part of error handling + print(f"Handled network failure error: {e}") + + except Exception as e: + # Step 3: Log unexpected exceptions + print(f"Network failure encountered: {e}") + + finally: + # Step 4: Ensure driver disconnects properly + qkd_driver.Disconnect() diff --git a/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..150d00fd079b0a036f383653c833562279bb4d72 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py @@ -0,0 +1,40 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, requests +from unittest.mock import patch +from device.service.drivers.qkd.QKDDriver import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +@pytest.fixture +def qkd_driver(): + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + +# Deliverable Test ID: SBI_Test_01 +def test_qkd_driver_connection(qkd_driver): + assert qkd_driver.Connect() is True + +# Deliverable Test ID: SBI_Test_01 +def test_qkd_driver_invalid_connection(): + qkd_driver = QKDDriver(address='127.0.0.1', port=12345, username='user', password='pass') # Use invalid port directly + assert qkd_driver.Connect() is False + +# Deliverable Test ID: SBI_Test_10 +@patch('device.service.drivers.qkd.QKDDriver2.requests.get') +def test_qkd_driver_timeout_connection(mock_get, qkd_driver): + mock_get.side_effect = requests.exceptions.Timeout + qkd_driver.timeout = 0.001 # Simulate very short timeout + assert qkd_driver.Connect() is False diff --git a/src/device/tests/qkd/unit/test_qkd_performance.py b/src/device/tests/qkd/unit/test_qkd_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..b15d1ab070b61333c55f7674e39ba74aae891de6 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_performance.py @@ -0,0 +1,32 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# tests/unit/test_qkd_performance.py + +import pytest, time +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + +def test_performance_under_load(): + driver = QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + driver.Connect() + + start_time = time.time() + for _ in range(1000): + driver.GetConfig(['/qkd_interfaces/qkd_interface']) + end_time = time.time() + + assert (end_time - start_time) < 60 diff --git a/src/device/tests/qkd/unit/test_qkd_security.py b/src/device/tests/qkd/unit/test_qkd_security.py new file mode 100644 index 0000000000000000000000000000000000000000..f2942fd4685dce13f89832528d4298b267707886 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_security.py @@ -0,0 +1,88 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import pytest +import requests +from requests.exceptions import HTTPError +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +from device.service.drivers.qkd.Tools2 import RESOURCE_CAPABILITES + +# Helper function to print data in a formatted JSON style for debugging +def print_data(label, data): + print(f"{label}: {json.dumps(data, indent=2)}") + +# Environment variables for sensitive information +QKD1_ADDRESS = os.getenv("QKD1_ADDRESS") +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 +PORT = os.getenv("QKD_PORT") +USERNAME = os.getenv("QKD_USERNAME") +PASSWORD = os.getenv("QKD_PASSWORD") + + +# Utility function to retrieve JWT token +def get_jwt_token(address, port, username, password): + url = f"http://{address}:{port}/login" + headers = {"Content-Type": "application/x-www-form-urlencoded"} + payload = f"username={username}&password={password}" + + try: + response = requests.post(url, data=payload, headers=headers) + response.raise_for_status() + return response.json().get('access_token') + except requests.exceptions.RequestException as e: + print(f"Failed to retrieve JWT token: {e}") + return None + +# Real QKD Driver (Requires JWT token) +@pytest.fixture +def real_qkd_driver(): + token = get_jwt_token(QKD1_ADDRESS, PORT, USERNAME, PASSWORD) # Replace with actual details + if not token: + pytest.fail("Failed to retrieve JWT token.") + headers = {'Authorization': f'Bearer {token}'} + return QKDDriver(address=QKD1_ADDRESS, port=PORT, headers=headers) + +# Mock QKD Driver (No actual connection, mock capabilities) +@pytest.fixture +def mock_qkd_driver(): + # Initialize the mock QKD driver with mock settings + token = "mock_token" + headers = {"Authorization": f"Bearer {token}"} + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, headers=headers) + +# General function to retrieve and test capabilities +def retrieve_capabilities(qkd_driver, driver_name): + try: + qkd_driver.Connect() + capabilities = qkd_driver.GetConfig([RESOURCE_CAPABILITES]) + assert isinstance(capabilities, list), "Expected a list of capabilities" + assert len(capabilities) > 0, f"No capabilities found for {driver_name}" + print_data(f"{driver_name} Capabilities", capabilities) + except HTTPError as e: + pytest.fail(f"HTTPError while fetching capabilities for {driver_name}: {e}") + except AssertionError as e: + pytest.fail(f"AssertionError: {e}") + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + +# Test for Real QKD Capabilities +def test_real_qkd_capabilities(real_qkd_driver): + retrieve_capabilities(real_qkd_driver, "Real QKD") + +# Test for Mock QKD Capabilities +def test_mock_qkd_capabilities(mock_qkd_driver): + retrieve_capabilities(mock_qkd_driver, "Mock QKD") diff --git a/src/device/tests/qkd/unit/test_qkd_subscription.py b/src/device/tests/qkd/unit/test_qkd_subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..883fe2a1a6defe85a995c12e824a5e7d88159981 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_subscription.py @@ -0,0 +1,53 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from typing import List, Tuple +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + + +def test_state_subscription(qkd_driver): + """ + Test Case ID: SBI_Test_06 - Subscribe to state changes and validate the subscription process. + """ + qkd_driver.Connect() + + try: + # Step 1: Define the subscription + subscriptions = [ + ('00000001-0000-0000-0000-000000000000', 60, 10) # (node_id, frequency, timeout) + ] + + # Step 2: Subscribe to state changes using the driver method + subscription_results = qkd_driver.SubscribeState(subscriptions) + + # Step 3: Validate that the subscription was successful + assert all(result is True for result in subscription_results), "Subscription to state changes failed." + + print("State subscription successful:", subscription_results) + + except Exception as e: + pytest.fail(f"An unexpected error occurred during state subscription: {e}") + + finally: + qkd_driver.Disconnect() diff --git a/src/device/tests/qkd/unit/test_qkd_unsubscription.py b/src/device/tests/qkd/unit/test_qkd_unsubscription.py new file mode 100644 index 0000000000000000000000000000000000000000..883fe2a1a6defe85a995c12e824a5e7d88159981 --- /dev/null +++ b/src/device/tests/qkd/unit/test_qkd_unsubscription.py @@ -0,0 +1,53 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from typing import List, Tuple +from device.service.drivers.qkd.QKDDriver2 import QKDDriver + +MOCK_QKD_ADDRRESS = '127.0.0.1' +MOCK_PORT = 11111 + + +@pytest.fixture +def qkd_driver(): + # Initialize the QKD driver + return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass') + + +def test_state_subscription(qkd_driver): + """ + Test Case ID: SBI_Test_06 - Subscribe to state changes and validate the subscription process. + """ + qkd_driver.Connect() + + try: + # Step 1: Define the subscription + subscriptions = [ + ('00000001-0000-0000-0000-000000000000', 60, 10) # (node_id, frequency, timeout) + ] + + # Step 2: Subscribe to state changes using the driver method + subscription_results = qkd_driver.SubscribeState(subscriptions) + + # Step 3: Validate that the subscription was successful + assert all(result is True for result in subscription_results), "Subscription to state changes failed." + + print("State subscription successful:", subscription_results) + + except Exception as e: + pytest.fail(f"An unexpected error occurred during state subscription: {e}") + + finally: + qkd_driver.Disconnect() diff --git a/src/device/tests/qkd/unit/test_set_new_configuration.py b/src/device/tests/qkd/unit/test_set_new_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..438e46d74f0ad1204f496aaf99e29e21f41c5805 --- /dev/null +++ b/src/device/tests/qkd/unit/test_set_new_configuration.py @@ -0,0 +1,126 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, requests, uuid +from requests.exceptions import HTTPError +from device.service.drivers.qkd.QKDDriver2 import QKDDriver +from device.service.drivers.qkd.Tools2 import RESOURCE_APPS + +MOCK_QKD1_ADDRRESS = '127.0.0.1' +MOCK_PORT1 = 11111 +MOCK_QKD3_ADDRRESS = '127.0.0.1' +MOCK_PORT3 = 33333 + +@pytest.fixture +def qkd_driver1(): + # Initialize the QKD driver for QKD1 + return QKDDriver(address=MOCK_QKD1_ADDRRESS, port=MOCK_PORT1, username='user', password='pass') + +@pytest.fixture +def qkd_driver3(): + # Initialize the QKD driver for QKD3 + return QKDDriver(address=MOCK_QKD3_ADDRRESS, port=MOCK_PORT3, username='user', password='pass') + +def create_qkd_app(driver, qkdn_id, backing_qkdl_id, client_app_id=None): + """ + Helper function to create QKD applications on the given driver. + """ + server_app_id = str(uuid.uuid4()) # Generate a unique server_app_id + + app_payload = { + 'app': { + 'server_app_id': server_app_id, + 'client_app_id': client_app_id if client_app_id else [], # Add client_app_id if provided + 'app_status': 'ON', + 'local_qkdn_id': qkdn_id, + 'backing_qkdl_id': backing_qkdl_id + } + } + + try: + # Log the payload being sent + print(f"Sending payload to {driver.address}: {app_payload}") + + # Send POST request to create the application + response = requests.post(f'http://{driver.address}/app/create_qkd_app', json=app_payload) + + # Check if the request was successful (HTTP 2xx) + response.raise_for_status() + + # Validate the response + assert response.status_code == 200, f"Failed to create QKD app for {driver.address}: {response.text}" + + response_data = response.json() + assert response_data.get('status') == 'success', "Application creation failed." + + # Log the response from the server + print(f"Server {driver.address} response: {response_data}") + + return server_app_id # Return the created server_app_id + + except HTTPError as e: + pytest.fail(f"HTTP error occurred while creating the QKD application on {driver.address}: {e}") + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + +def test_create_qkd_application_bidirectional(qkd_driver1, qkd_driver3): + """ + Create QKD applications on both qkd1 and qkd3, and validate the complete creation in both directions. + """ + + qkd_driver1.Connect() + qkd_driver3.Connect() + + try: + # Step 1: Create QKD application for qkd1, referencing qkd3 as the backing QKDL + server_app_id_qkd1 = create_qkd_app( + qkd_driver1, + qkdn_id='00000001-0000-0000-0000-000000000000', + backing_qkdl_id=['00000003-0002-0000-0000-000000000000'] # qkd3's QKDL + ) + + # Step 2: Create QKD application for qkd3, referencing qkd1 as the backing QKDL, and setting client_app_id to qkd1's app + create_qkd_app( + qkd_driver3, + qkdn_id='00000003-0000-0000-0000-000000000000', + backing_qkdl_id=['00000003-0002-0000-0000-000000000000'], # qkd3's QKDL + client_app_id=[server_app_id_qkd1] # Set qkd1 as the client + ) + + # Step 3: Fetch applications from both qkd1 and qkd3 to validate that the applications exist + apps_qkd1 = qkd_driver1.GetConfig([RESOURCE_APPS]) + apps_qkd3 = qkd_driver3.GetConfig([RESOURCE_APPS]) + + print(f"QKD1 applications config: {apps_qkd1}") + print(f"QKD3 applications config: {apps_qkd3}") + + # Debugging: Print the full structure of the apps to understand what is returned + for app in apps_qkd1: + print(f"QKD1 App: {app}") + + # Debugging: Print the full structure of the apps to understand what is returned + for app in apps_qkd3: + print(f"QKD3 App: {app}") + + # Step 4: Validate the applications are created using app_id instead of server_app_id + assert any(app[1].get('app_id') == '00000001-0001-0000-0000-000000000000' for app in apps_qkd1), "QKD app not created on qkd1." + assert any(app[1].get('app_id') == '00000003-0001-0000-0000-000000000000' for app in apps_qkd3), "QKD app not created on qkd3." + + print("QKD applications created successfully in both directions.") + + except Exception as e: + pytest.fail(f"An unexpected error occurred: {e}") + finally: + qkd_driver1.Disconnect() + qkd_driver3.Disconnect() diff --git a/src/device/tests/test_unitary_openconfig_ocnos.py b/src/device/tests/test_unitary_openconfig_ocnos.py new file mode 100644 index 0000000000000000000000000000000000000000..87d951581ad98147f8dd565af616fe034a346693 --- /dev/null +++ b/src/device/tests/test_unitary_openconfig_ocnos.py @@ -0,0 +1,210 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, os, pytest, time +from typing import Dict, Tuple +os.environ['DEVICE_EMULATED_ONLY'] = 'YES' + +# pylint: disable=wrong-import-position +from device.service.drivers.openconfig.OpenConfigDriver import OpenConfigDriver +#from device.service.driver_api._Driver import ( +# RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES +#) + +logging.basicConfig(level=logging.DEBUG) +#logging.getLogger('ncclient.operations.rpc').setLevel(logging.INFO) +#logging.getLogger('ncclient.transport.parser').setLevel(logging.INFO) + +LOGGER = logging.getLogger(__name__) + + +##### DRIVERS FIXTURE ################################################################################################## + +DEVICES = { + 'CSGW1': {'address': '10.1.1.86', 'port': 830, 'settings': { + 'username': 'ocnos', 'password': 'ocnos', + 'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False, + 'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120} + }}, + 'CSGW2': {'address': '10.1.1.87', 'port': 830, 'settings': { + 'username': 'ocnos', 'password': 'ocnos', + 'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False, + 'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120} + }}, +} + +@pytest.fixture(scope='session') +def drivers() -> Dict[str, OpenConfigDriver]: + _drivers : Dict[str, OpenConfigDriver] = dict() + for device_name, driver_params in DEVICES.items(): + driver = OpenConfigDriver(driver_params['address'], driver_params['port'], **(driver_params['settings'])) + driver.Connect() + _drivers[device_name] = driver + yield _drivers + time.sleep(1) + for _,driver in _drivers.items(): + driver.Disconnect() + + +def network_instance(ni_name, ni_type, ni_router_id=None, ni_route_distinguisher=None) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]'.format(ni_name) + data = {'name': ni_name, 'type': ni_type} + if ni_router_id is not None: data['router_id'] = ni_router_id + if ni_route_distinguisher is not None: data['route_distinguisher'] = ni_route_distinguisher + return path, json.dumps(data) + +def network_instance_add_protocol_bgp(ni_name, ni_type, ni_router_id, ni_bgp_as, neighbors=[]) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/protocols[BGP]'.format(ni_name) + data = { + 'name': ni_name, 'type': ni_type, 'router_id': ni_router_id, 'identifier': 'BGP', + 'protocol_name': ni_bgp_as, 'as': ni_bgp_as + } + if len(neighbors) > 0: + data['neighbors'] = [ + {'ip_address': neighbor_ip_address, 'remote_as': neighbor_remote_as} + for neighbor_ip_address, neighbor_remote_as in neighbors + ] + return path, json.dumps(data) + +def network_instance_add_protocol_direct(ni_name, ni_type) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(ni_name) + data = { + 'name': ni_name, 'type': ni_type, 'identifier': 'DIRECTLY_CONNECTED', + 'protocol_name': 'DIRECTLY_CONNECTED' + } + return path, json.dumps(data) + +def network_instance_add_protocol_static(ni_name, ni_type) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/protocols[STATIC]'.format(ni_name) + data = { + 'name': ni_name, 'type': ni_type, 'identifier': 'STATIC', + 'protocol_name': 'STATIC' + } + return path, json.dumps(data) + +#def network_instance_static_route(ni_name, prefix, next_hop, next_hop_index=0) -> Tuple[str, Dict]: +# path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix) +# data = {'name': ni_name, 'prefix': prefix, 'next_hop': next_hop, 'next_hop_index': next_hop_index} +# return path, json.dumps(data) + +def network_instance_add_table_connection( + ni_name, src_protocol, dst_protocol, address_family, default_import_policy, bgp_as=None +) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format( + ni_name, src_protocol, dst_protocol, address_family + ) + data = { + 'name': ni_name, 'src_protocol': src_protocol, 'dst_protocol': dst_protocol, + 'address_family': address_family, 'default_import_policy': default_import_policy, + } + if bgp_as is not None: data['as'] = bgp_as + return path, json.dumps(data) + +def interface( + name, index, description=None, if_type=None, vlan_id=None, mtu=None, ipv4_address_prefix=None, enabled=None +) -> Tuple[str, Dict]: + path = '/interface[{:s}]/subinterface[{:d}]'.format(name, index) + data = {'name': name, 'index': index} + if description is not None: data['description'] = description + if if_type is not None: data['type' ] = if_type + if vlan_id is not None: data['vlan_id' ] = vlan_id + if mtu is not None: data['mtu' ] = mtu + if enabled is not None: data['enabled' ] = enabled + if ipv4_address_prefix is not None: + ipv4_address, ipv4_prefix = ipv4_address_prefix + data['address_ip' ] = ipv4_address + data['address_prefix'] = ipv4_prefix + return path, json.dumps(data) + +def network_instance_interface(ni_name, ni_type, if_name, if_index) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, if_index) + data = {'name': ni_name, 'type': ni_type, 'id': if_name, 'interface': if_name, 'subinterface': if_index} + return path, json.dumps(data) + +def test_configure(drivers : Dict[str, OpenConfigDriver]): + #resources_to_get = [] + #resources_to_get = [RESOURCE_ENDPOINTS] + #resources_to_get = [RESOURCE_INTERFACES] + #resources_to_get = [RESOURCE_NETWORK_INSTANCES] + #resources_to_get = [RESOURCE_ROUTING_POLICIES] + #resources_to_get = [RESOURCE_SERVICES] + #LOGGER.info('resources_to_get = {:s}'.format(str(resources_to_get))) + #results_getconfig = driver.GetConfig(resources_to_get) + #LOGGER.info('results_getconfig = {:s}'.format(str(results_getconfig))) + + csgw1_resources_to_set = [ + network_instance('ecoc24', 'L3VRF', '192.168.150.1', '65001:1'), + network_instance_add_protocol_direct('ecoc24', 'L3VRF'), + network_instance_add_protocol_static('ecoc24', 'L3VRF'), + network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.1', '65001', neighbors=[ + ('192.168.150.2', '65001') + ]), + network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), + network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), + + interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500), + network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), + interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.10.1', 24), enabled=True), + + interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500), + network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), + interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.1', 24), enabled=True), + ] + LOGGER.info('CSGW1 resources_to_set = {:s}'.format(str(csgw1_resources_to_set))) + results_setconfig = drivers['CSGW1'].SetConfig(csgw1_resources_to_set) + LOGGER.info('CSGW1 results_setconfig = {:s}'.format(str(results_setconfig))) + + csgw2_resources_to_set = [ + network_instance('ecoc24', 'L3VRF', '192.168.150.2', '65001:1'), + network_instance_add_protocol_direct('ecoc24', 'L3VRF'), + network_instance_add_protocol_static('ecoc24', 'L3VRF'), + network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.2', '65001', neighbors=[ + ('192.168.150.1', '65001') + ]), + network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), + network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), + + interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500), + network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), + interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.20.1', 24), enabled=True), + + interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500), + network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), + interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.2', 24), enabled=True), + ] + LOGGER.info('CSGW2 resources_to_set = {:s}'.format(str(csgw2_resources_to_set))) + results_setconfig = drivers['CSGW2'].SetConfig(csgw2_resources_to_set) + LOGGER.info('CSGW2 results_setconfig = {:s}'.format(str(results_setconfig))) + + csgw1_resources_to_delete = [ + network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), + network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), + #interface('ce1', 0), + #interface('xe5', 0), + network_instance('ecoc24', 'L3VRF'), + ] + LOGGER.info('CSGW1 resources_to_delete = {:s}'.format(str(csgw1_resources_to_delete))) + results_deleteconfig = drivers['CSGW1'].DeleteConfig(csgw1_resources_to_delete) + LOGGER.info('CSGW1 results_deleteconfig = {:s}'.format(str(results_deleteconfig))) + + csgw2_resources_to_delete = [ + network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), + network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), + #interface('ce1', 0), + #interface('xe5', 0), + network_instance('ecoc24', 'L3VRF'), + ] + LOGGER.info('CSGW2 resources_to_delete = {:s}'.format(str(csgw2_resources_to_delete))) + results_deleteconfig = drivers['CSGW2'].DeleteConfig(csgw2_resources_to_delete) + LOGGER.info('CSGW2 results_deleteconfig = {:s}'.format(str(results_deleteconfig))) diff --git a/src/dlt/connector/client/DltConnectorClientAsync.py b/src/dlt/connector/client/DltConnectorClientAsync.py new file mode 100644 index 0000000000000000000000000000000000000000..b9ed8a4d6e1f3878ad148cac5529bd6908a03a24 --- /dev/null +++ b/src/dlt/connector/client/DltConnectorClientAsync.py @@ -0,0 +1,113 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging, asyncio + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc +from common.proto.context_pb2 import Empty, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId +from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceStub +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class DltConnectorClientAsync: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.DLT) + if not port: port = get_service_port_grpc(ServiceNameEnum.DLT) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) + self.channel = None + self.stub = None + #self.connect() + #LOGGER.debug('Channel created') + + async def connect(self): + self.channel = grpc.aio.insecure_channel(self.endpoint) + self.stub = DltConnectorServiceStub(self.channel) + LOGGER.debug('Channel created') + + async def close(self): + if self.channel is not None: + await self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + async def RecordAll(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAll request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAll(request) + LOGGER.debug('RecordAll result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllDevices(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllDevices request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllDevices(request) + LOGGER.debug('RecordAllDevices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordDevice(self, request: DltDeviceId) -> Empty: + LOGGER.debug('RECORD_DEVICE request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordDevice(request) + LOGGER.debug('RecordDevice result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllLinks(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllLinks request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllLinks(request) + LOGGER.debug('RecordAllLinks result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordLink(self, request: DltLinkId) -> Empty: + LOGGER.debug('RecordLink request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordLink(request) + LOGGER.debug('RecordLink result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllServices(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllServices request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllServices(request) + LOGGER.debug('RecordAllServices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordService(self, request: DltServiceId) -> Empty: + LOGGER.debug('RecordService request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordService(request) + LOGGER.debug('RecordService result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordAllSlices(self, request: TopologyId) -> Empty: + LOGGER.debug('RecordAllSlices request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordAllSlices(request) + LOGGER.debug('RecordAllSlices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def RecordSlice(self, request: DltSliceId) -> Empty: + LOGGER.debug('RecordSlice request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordSlice(request) + LOGGER.debug('RecordSlice result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/dlt/connector/client/DltEventsCollector.py b/src/dlt/connector/client/DltEventsCollector.py index e59784a4d2902459d7bc88925e5b83a698770012..ce7d01480ba522a4006ca5d7b02943a157a10d29 100644 --- a/src/dlt/connector/client/DltEventsCollector.py +++ b/src/dlt/connector/client/DltEventsCollector.py @@ -19,7 +19,6 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from dlt.connector.client.DltGatewayClient import DltGatewayClient LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) # This class accepts an event_handler method as attribute that can be used to pre-process and # filter events before they reach the events_queue. Depending on the handler, the supported diff --git a/src/dlt/connector/client/DltGatewayClient.py b/src/dlt/connector/client/DltGatewayClient.py index 31ad4cca22d087f1a8d3d9ce573a6605adf3c1df..21d4df57dd5eaa72eddc147b83b6a66344e20f02 100644 --- a/src/dlt/connector/client/DltGatewayClient.py +++ b/src/dlt/connector/client/DltGatewayClient.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterator + import grpc, logging +from typing import Iterator from common.proto.context_pb2 import Empty, TeraFlowController from common.proto.dlt_gateway_pb2 import ( - DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription) + DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription +) from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -43,7 +45,8 @@ class DltGatewayClient: self.stub = DltGatewayServiceStub(self.channel) def close(self): - if self.channel is not None: self.channel.close() + if self.channel is not None: + self.channel.close() self.channel = None self.stub = None diff --git a/src/dlt/connector/client/DltGatewayClientAsync.py b/src/dlt/connector/client/DltGatewayClientAsync.py new file mode 100644 index 0000000000000000000000000000000000000000..816241ec587295ffd6a8ef24cca5c66942a849d5 --- /dev/null +++ b/src/dlt/connector/client/DltGatewayClientAsync.py @@ -0,0 +1,85 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio, grpc, logging +from typing import Iterator, List +from common.proto.context_pb2 import Empty, TeraFlowController +from common.proto.dlt_gateway_pb2 import ( + DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription +) +from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string +from dlt.connector.Config import DLT_GATEWAY_HOST, DLT_GATEWAY_PORT + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class DltGatewayClientAsync: + def __init__(self, host=None, port=None): + if not host: host = DLT_GATEWAY_HOST + if not port: port = DLT_GATEWAY_PORT + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) + self.channel = None + self.stub = None + self.message_queue: List[DltRecord] = [] + + async def connect(self): + self.channel = grpc.aio.insecure_channel(self.endpoint) + self.stub = DltGatewayServiceStub(self.channel) + LOGGER.debug('Channel created') + + async def close(self): + if self.channel is not None: + await self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + async def RecordToDlt(self, request : DltRecord) -> DltRecordStatus: + LOGGER.debug('RecordToDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.RecordToDlt(request) + LOGGER.debug('RecordToDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def GetFromDlt(self, request : DltRecordId) -> DltRecord: + LOGGER.debug('GetFromDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.GetFromDlt(request) + LOGGER.debug('GetFromDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SubscribeToDlt(self, request : DltRecordSubscription) -> Iterator[DltRecordEvent]: + LOGGER.debug('SubscribeToDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SubscribeToDlt(request) + LOGGER.debug('SubscribeToDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def GetDltStatus(self, request : TeraFlowController) -> DltPeerStatus: + LOGGER.debug('GetDltStatus request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.GetDltStatus(request) + LOGGER.debug('GetDltStatus result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + async def GetDltPeers(self, request : Empty) -> DltPeerStatusList: + LOGGER.debug('GetDltPeers request: {:s}'.format(grpc_message_to_json_string(request))) + response = await self.stub.GetDltPeers(request) + LOGGER.debug('GetDltPeers result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/dlt/connector/service/DltConnectorService.py b/src/dlt/connector/service/DltConnectorService.py index 7e99cb8f85e519ec875a1decc7dc0ad1e030a6f4..601d3e70d298d8ae2a098339e71fc49d66fb79ad 100644 --- a/src/dlt/connector/service/DltConnectorService.py +++ b/src/dlt/connector/service/DltConnectorService.py @@ -14,15 +14,16 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc -from common.tools.service.GenericGrpcService import GenericGrpcService +from common.tools.service.GenericGrpcServiceAsync import GenericGrpcServiceAsync from common.proto.dlt_connector_pb2_grpc import add_DltConnectorServiceServicer_to_server from .DltConnectorServiceServicerImpl import DltConnectorServiceServicerImpl -class DltConnectorService(GenericGrpcService): +class DltConnectorService(GenericGrpcServiceAsync): def __init__(self, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.DLT) super().__init__(port, cls_name=cls_name) self.dltconnector_servicer = DltConnectorServiceServicerImpl() - def install_servicers(self): - add_DltConnectorServiceServicer_to_server(self.dltconnector_servicer, self.server) + async def install_servicers(self): + await self.dltconnector_servicer.initialize() + add_DltConnectorServiceServicer_to_server(self.dltconnector_servicer, self.server) \ No newline at end of file diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py index 42e86b102d3715d68f198d5e341a6f663f35ef4b..c1a4b7b1ea2f978e798d77bfe05cfa3f245283ce 100644 --- a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py +++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py @@ -12,16 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging +import logging +from grpc.aio import ServicerContext from typing import Optional -from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method_async from common.proto.context_pb2 import Empty, TopologyId from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer from common.proto.dlt_gateway_pb2 import DltRecord, DltRecordId, DltRecordOperationEnum, DltRecordTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient -from dlt.connector.client.DltGatewayClient import DltGatewayClient +from dlt.connector.client.DltGatewayClientAsync import DltGatewayClientAsync from .tools.Checkers import record_exists LOGGER = logging.getLogger(__name__) @@ -31,86 +32,91 @@ METRICS_POOL = MetricsPool('DltConnector', 'RPC') class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') + self.dltgateway_client = DltGatewayClientAsync() LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAll(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + async def initialize(self): + await self.dltgateway_client.connect() + + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAll(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllDevices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllDevices(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordDevice(self, request : DltDeviceId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordDevice(self, request : DltDeviceId, context : ServicerContext) -> Empty: data_json = None - if not request.delete: + LOGGER.debug('RECORD_DEVICE = {:s}'.format(grpc_message_to_json_string(request))) + if not request.delete: context_client = ContextClient() device = context_client.GetDevice(request.device_id) data_json = grpc_message_to_json_string(device) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_DEVICE, request.device_id.device_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllLinks(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllLinks(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordLink(self, request : DltLinkId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordLink(self, request : DltLinkId, context : ServicerContext) -> Empty: data_json = None + LOGGER.debug('RECORD_LINK = {:s}'.format(grpc_message_to_json_string(request))) + if not request.delete: context_client = ContextClient() link = context_client.GetLink(request.link_id) data_json = grpc_message_to_json_string(link) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_LINK, request.link_id.link_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllServices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllServices(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordService(self, request : DltServiceId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordService(self, request : DltServiceId, context : ServicerContext) -> Empty: data_json = None if not request.delete: context_client = ContextClient() service = context_client.GetService(request.service_id) data_json = grpc_message_to_json_string(service) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_SERVICE, request.service_id.service_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordAllSlices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordAllSlices(self, request : TopologyId, context : ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def RecordSlice(self, request : DltSliceId, context : grpc.ServicerContext) -> Empty: + @safe_and_metered_rpc_method_async(METRICS_POOL, LOGGER) + async def RecordSlice(self, request : DltSliceId, context : ServicerContext) -> Empty: data_json = None if not request.delete: context_client = ContextClient() slice_ = context_client.GetSlice(request.slice_id) data_json = grpc_message_to_json_string(slice_) - self._record_entity( + await self._record_entity( request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_SLICE, request.slice_id.slice_uuid.uuid, request.delete, data_json) return Empty() - def _record_entity( + async def _record_entity( self, dlt_domain_uuid : str, dlt_record_type : DltRecordTypeEnum, dlt_record_uuid : str, delete : bool, data_json : Optional[str] = None ) -> None: - dltgateway_client = DltGatewayClient() - dlt_record_id = DltRecordId() dlt_record_id.domain_uuid.uuid = dlt_domain_uuid # pylint: disable=no-member dlt_record_id.type = dlt_record_type @@ -118,7 +124,7 @@ class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): str_dlt_record_id = grpc_message_to_json_string(dlt_record_id) LOGGER.debug('[_record_entity] sent dlt_record_id = {:s}'.format(str_dlt_record_id)) - dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + dlt_record = await self.dltgateway_client.GetFromDlt(dlt_record_id) str_dlt_record = grpc_message_to_json_string(dlt_record) LOGGER.debug('[_record_entity] recv dlt_record = {:s}'.format(str_dlt_record)) @@ -131,17 +137,19 @@ class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_DELETE elif not delete and exists: dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE - if data_json is None: raise Exception('data_json must be provided when updating') + if data_json is None: + raise Exception('data_json must be provided when updating') dlt_record.data_json = data_json elif not delete and not exists: dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_ADD - if data_json is None: raise Exception('data_json must be provided when adding') + if data_json is None: + raise Exception('data_json must be provided when adding') dlt_record.data_json = data_json else: return str_dlt_record = grpc_message_to_json_string(dlt_record) LOGGER.debug('[_record_entity] sent dlt_record = {:s}'.format(str_dlt_record)) - dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + dlt_record_status = await self.dltgateway_client.RecordToDlt(dlt_record) str_dlt_record_status = grpc_message_to_json_string(dlt_record_status) LOGGER.debug('[_record_entity] recv dlt_record_status = {:s}'.format(str_dlt_record_status)) diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py index 5e0fb6f878dca4e244274632e8931bc25a4f1319..d1c9461247176f4f8d2849fb4cd594072d2c3a55 100644 --- a/src/dlt/connector/service/__main__.py +++ b/src/dlt/connector/service/__main__.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, signal, sys, threading + +import logging, signal, threading, asyncio +from typing import Optional from prometheus_client import start_http_server from common.Constants import ServiceNameEnum from common.Settings import ( @@ -22,13 +24,13 @@ from .event_dispatcher.DltEventDispatcher import DltEventDispatcher from .DltConnectorService import DltConnectorService terminate = threading.Event() -LOGGER : logging.Logger = None +LOGGER : Optional[logging.Logger] = None def signal_handler(signal, frame): # pylint: disable=redefined-outer-name LOGGER.warning('Terminate signal received') terminate.set() -def main(): +async def main(): global LOGGER # pylint: disable=global-statement log_level = get_log_level() @@ -55,13 +57,14 @@ def main(): # Starting DLT connector service grpc_service = DltConnectorService() - grpc_service.start() + await grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=1.0): pass + while not terminate.is_set(): + await asyncio.sleep(1.0) LOGGER.info('Terminating...') - grpc_service.stop() + await grpc_service.stop() event_dispatcher.stop() event_dispatcher.join() @@ -69,4 +72,4 @@ def main(): return 0 if __name__ == '__main__': - sys.exit(main()) + asyncio.run(main()) diff --git a/src/dlt/connector/service/tools/Checkers.py b/src/dlt/connector/service/tools/Checkers.py index 6ad0f4b82626740c594829831b08fcefbc15096d..5b19afcd24171767c9b1130c8cb9d594b5afa5bf 100644 --- a/src/dlt/connector/service/tools/Checkers.py +++ b/src/dlt/connector/service/tools/Checkers.py @@ -20,5 +20,5 @@ def record_exists(record : DltRecord) -> bool: exists = exists and (record.record_id.type != DLTRECORDTYPE_UNDEFINED) exists = exists and (len(record.record_id.record_uuid.uuid) > 0) #exists = exists and (record.operation != DLTRECORDOPERATION_UNDEFINED) - exists = exists and (len(record.data_json) > 0) + #exists = exists and (len(record.data_json) > 0) # It conflicts as sometimes records do not have a data_json. return exists diff --git a/src/dlt/connector/tests/basic.py b/src/dlt/connector/tests/basic.py index da400dcbeed261578929262a6cc0bcc29dc5390e..08c5980889d66318b2221f6c62ff7918f05d1efc 100644 --- a/src/dlt/connector/tests/basic.py +++ b/src/dlt/connector/tests/basic.py @@ -17,12 +17,12 @@ # PYTHONPATH=/home/cttc/teraflow/src python -m dlt.connector.tests.basic import logging, sys, time +from common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device from common.proto.dlt_gateway_pb2 import ( DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, DLTRECORDTYPE_UNDEFINED, DltRecord, DltRecordId) from common.tools.object_factory.Device import json_device from common.tools.grpc.Tools import grpc_message_to_json_string -from src.common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device from ..client.DltGatewayClient import DltGatewayClient from ..client.DltEventsCollector import DltEventsCollector diff --git a/src/dlt/gateway/Dockerfile b/src/dlt/gateway/Dockerfile index 5b888b410360330cdf044c88f5832d1738d546e8..ace9cb22ee691efd7565120034a6a1ef36a27aaf 100644 --- a/src/dlt/gateway/Dockerfile +++ b/src/dlt/gateway/Dockerfile @@ -12,30 +12,34 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM zenika/kotlin:1.4-jdk12 +# Use an official Node.js runtime as a parent image +FROM node:20 -# Make working directory move to it and copy DLT Gateway code -RUN mkdir -p /var/teraflow/dlt/gateway -WORKDIR /var/teraflow/dlt/gateway -COPY src/dlt/gateway/. ./ +# Set the working directory in the container +WORKDIR /usr/dltApp -# Make directory for proto files and copy them -RUN mkdir proto -COPY proto/*.proto ./proto/ +# Create proto directory before copying the .proto files +RUN mkdir -p ./proto -# Build DLT Gateway -RUN ./gradlew build +# Copy package.json and package-lock.json +COPY src/dlt/gateway/dltApp/package*.json ./ +# Copy tsconfig.json +COPY src/dlt/gateway/dltApp/tsconfig*.json ./ +# Copy the proto folder contents +COPY proto/acl.proto ./proto/acl.proto +COPY proto/kpi_sample_types.proto ./proto/kpi_sample_types.proto +COPY proto/context.proto ./proto/context.proto +COPY proto/dlt_gateway.proto ./proto/dlt_gateway.proto + +# Copy the src folder +COPY src/dlt/gateway/dltApp/src/ ./src + +# Install dependencies +RUN npm install + +# Expose the port that the gRPC service runs on EXPOSE 50051 -# Create entrypoint.sh script -RUN echo "#!/bin/sh" > /entrypoint.sh -RUN echo "echo 195.37.154.24 peer0.org1.example.com >> /etc/hosts" >> /entrypoint.sh -RUN echo "echo 195.37.154.24 peer0.org2.example.com >> /etc/hosts" >> /entrypoint.sh -RUN echo "echo 195.37.154.24 orderer0.example.com >> /etc/hosts" >> /entrypoint.sh -RUN echo "cd /var/teraflow/dlt/gateway" >> /entrypoint.sh -RUN echo "./gradlew runServer" >> /entrypoint.sh -RUN chmod +x /entrypoint.sh - -# Gateway entry point -ENTRYPOINT ["sh", "/entrypoint.sh"] +# Command to run the service +CMD ["node", "src/dltGateway.js"] diff --git a/src/dlt/gateway/README.md b/src/dlt/gateway/README.md index 2cf6cfeb1682ade5a77f53fe13c96daed6dc33fd..4a38545eae7492e8ec5abac4fdf209da51b00aff 100644 --- a/src/dlt/gateway/README.md +++ b/src/dlt/gateway/README.md @@ -1,134 +1,35 @@ -``` - NEC Laboratories Europe GmbH - - PROPRIETARY INFORMATION - - The software and its source code contain valuable trade secrets and - shall be maintained in confidence and treated as confidential - information. The software may only be used for evaluation and/or - testing purposes, unless otherwise explicitly stated in a written - agreement with NEC Laboratories Europe GmbH. - - Any unauthorized publication, transfer to third parties or - duplication of the object or source code - either totally or in - part - is strictly prohibited. - - Copyright (c) 2022 NEC Laboratories Europe GmbH - All Rights Reserved. - - Authors: Konstantin Munichev <konstantin.munichev@neclab.eu> - - - NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE - WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND - THE ACCOMPANYING DOCUMENTATION. - - NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC - Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR - ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR - LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF - INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, - INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF - OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe - GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. - ``` +# DLT Gateway -# DLT module guide +## Description -## General information -The DLT module is used to provide access to the underlying Fabric deployment. It allows clients -to add, retrieve, modify and delete blockchain-backed data, essentially working as a key-value -database. External clients should use gRPC API to communicate with this service, its detailed -description available below. +The DLT Gateway consists of a **fabricConnect.ts** TypeScript file, which contains the logic for identification +management (certificates required for the MSP), connection management to the blockchain, and finally, it exposes a +contract object with all the required information for interacting with the chaincode. The **fabricConnect.ts** is +coded following the Fabric Gateway API recommendations from Hyperledger Fabric 2.4+. The compiled **fabricConnect.ts** +logic is imported into a **dltGateway.js** file, which contains the gRPC logic for interaction with the TFS controller. +Testing code for various performance tests is included inside the [/tests](./tests/) folder. -## Code structure -The whole DLT module consists of several packages: -- fabric package -- http package -- proto package -- client example +The chaincode is written in Go, providing a reference for the operations that are recorded in the blockchain. This +chaincode must already be deployed in a working Hyperledger Fabric blockchain. -### Fabric package -The most important class in this package is `FabricConnector`. First, it establishes connection -with the underlying Fabric network using Java Gateway SDK. After that, it could be used as a -CRUD interface. -Other files contain auxiliary code for `FabricConnector` which allows it to register/enroll -users and to obtain smart contract instances. +## Requisites -### Grpc package -Contains server side gRPC handler. It accepts requests from the outside and performs the -requested operation. For the more detailed description see Proto package description right below. +* NodeJS +* Docker +* Kubernetes (K8s) + +Sign and TLS certificates, and private key of the MSP user from the Hyperledger Fabric deployment must be copied to the +[/keys](./keys/) directory inside this repository. -### Proto package -The proto package contains `dlt.proto` file which defines gRPC service `DltService` API and messages -it uses. There are 3 main functions: `RecordToDlt` which allows to create/modify/delete data, -`GetFromDlt` which returns already written data and `SubscribeToDlt` which allows clients subscribe -for future create/modify/delete events with provided filters. -Other proto files don't play any significant role and could be safely ignored by end users. +Example: -### Client example -This code is not necessary to the service, but it could be used to test the service. It contains -a sample gRPC client which connects the service and perform all the CRUD operations. +```bash +cp ~/fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/users/User1@org1.example.com/tls/ca.crt src/dlt/gateway/keys/ -# Fabric deployment notes +cp ~/fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/users/User1@org1.example.com/msp/signcerts/User1@org1.example.com-cert.pem src/dlt/gateway/keys/cert.pem -## General notes -Current Fabric deployment uses Fabric test network with some additional helping scripts on top of it. -To start the network just run the `raft.sh` from `blockchain/scripts` directory. Use `stop.sh` -when you need to stop the network. - -## Server start preparations -To run the server it's necessary to copy certificate file -`fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/ca/ca.org1.example.com-cert.pem` -to the config folder (replacing the existing one). Also, it's necessary to copy `scripts/connection-org1.json` -file (again, replacing the old one). After copying, it must be edited. First, all `localhost` entrances -should be replaced with `teraflow.nlehd.de`. Second, `channel` section at the end of the file should be removed. -This should be done after every restart of the Fabric network. - -## Fabric configuration -Even though a test network is easy to deploy and use it's better to perform a custom configuration -for a production deployment. In practice every participating organization will likely prefer to have -its own Peer/Orderer/CA instances to prevent possible dependency on any other participants. This leads -not only to a better privacy/availability/security in general but also to the more complicated -deployment process as a side effect. Here we provide a very brief description of the most important points. - -### Organizations -Organization represents a network participant, which can be an individual, a large corporation or any other -entity. Each organization has its own CAs, orderers and peers. The recommendation here is to create an -organization entity for every independent participant and then decide how many CAs/peers/orderers does -every organization need and which channels should it has access to based on the exact project's goals. - -### Channels -Each channel represents an independent ledger with its own genesis block. Each transaction is executed -on a specific channel, and it's possible to define which organization has access to a given channel. -As a result channels are a pretty powerful privacy mechanism which allows to limit access to the private -data between organization. - -### Certificate authorities, peers and orderers -Certificate authorities (CA) are used to generate crypto materials for each organization. Two types of CA -exist: one is used to generate the certificates of the admin, the MSP and certificates of non-admin users. -Another type of CA is used to generate TLS certificates. As a result it's preferable to have at least two -CAs for every organization. - -Peers are entities which host ledgers and smart contracts. They communicate with applications and orderers, -receiving chaincode invocations (proposals), invoking chaincode, updating ledger when necessary and -returning result of execution. Peers can handle one or many ledgers, depending on the configuration. It's -very use case specific how many peers are necessary to the exact deployment. - -Orderers are used to execute a consensus in a distributing network making sure that every channel participant -has the same blocks with the same data. The default consensus algorithm is Raft which provides only a crash -fault tolerance. - -### Conclusion -As you can see, configuration procedure for Fabric is pretty tricky and includes quite a lot of entities. -In real world it will very likely involve participants from multiple organizations each of them performing -its own part of configuration. +cp ~/fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/users/User1@org1.example.com/msp/keystore/priv_sk src/dlt/gateway/keys/ +``` -As a further reading it's recommended to start with the -[official deployment guide](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployment_guide_overview.html). -It contains a high level overview of a deployment process as well as links to the detailed descriptions to -CA/Peer/Orderer configuration descriptions. \ No newline at end of file +These files are essential for establishing the identity and secure connection to the blockchain. Make sure you replace +the paths with your actual file locations from your Hyperledger Fabric deployment. diff --git a/src/dlt/gateway/.gitignore b/src/dlt/gateway/_legacy/.gitignore similarity index 100% rename from src/dlt/gateway/.gitignore rename to src/dlt/gateway/_legacy/.gitignore diff --git a/src/dlt/gateway/_legacy/Dockerfile b/src/dlt/gateway/_legacy/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5b888b410360330cdf044c88f5832d1738d546e8 --- /dev/null +++ b/src/dlt/gateway/_legacy/Dockerfile @@ -0,0 +1,41 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM zenika/kotlin:1.4-jdk12 + +# Make working directory move to it and copy DLT Gateway code +RUN mkdir -p /var/teraflow/dlt/gateway +WORKDIR /var/teraflow/dlt/gateway +COPY src/dlt/gateway/. ./ + +# Make directory for proto files and copy them +RUN mkdir proto +COPY proto/*.proto ./proto/ + +# Build DLT Gateway +RUN ./gradlew build + +EXPOSE 50051 + +# Create entrypoint.sh script +RUN echo "#!/bin/sh" > /entrypoint.sh +RUN echo "echo 195.37.154.24 peer0.org1.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "echo 195.37.154.24 peer0.org2.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "echo 195.37.154.24 orderer0.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "cd /var/teraflow/dlt/gateway" >> /entrypoint.sh +RUN echo "./gradlew runServer" >> /entrypoint.sh +RUN chmod +x /entrypoint.sh + +# Gateway entry point +ENTRYPOINT ["sh", "/entrypoint.sh"] diff --git a/src/dlt/gateway/_legacy/README.md b/src/dlt/gateway/_legacy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2cf6cfeb1682ade5a77f53fe13c96daed6dc33fd --- /dev/null +++ b/src/dlt/gateway/_legacy/README.md @@ -0,0 +1,134 @@ +``` + NEC Laboratories Europe GmbH + + PROPRIETARY INFORMATION + + The software and its source code contain valuable trade secrets and + shall be maintained in confidence and treated as confidential + information. The software may only be used for evaluation and/or + testing purposes, unless otherwise explicitly stated in a written + agreement with NEC Laboratories Europe GmbH. + + Any unauthorized publication, transfer to third parties or + duplication of the object or source code - either totally or in + part - is strictly prohibited. + + Copyright (c) 2022 NEC Laboratories Europe GmbH + All Rights Reserved. + + Authors: Konstantin Munichev <konstantin.munichev@neclab.eu> + + + NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE + WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND + THE ACCOMPANYING DOCUMENTATION. + + NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC + Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR + ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR + LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF + INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, + INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF + OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe + GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. + ``` + +# DLT module guide + +## General information +The DLT module is used to provide access to the underlying Fabric deployment. It allows clients +to add, retrieve, modify and delete blockchain-backed data, essentially working as a key-value +database. External clients should use gRPC API to communicate with this service, its detailed +description available below. + +## Code structure +The whole DLT module consists of several packages: +- fabric package +- http package +- proto package +- client example + +### Fabric package +The most important class in this package is `FabricConnector`. First, it establishes connection +with the underlying Fabric network using Java Gateway SDK. After that, it could be used as a +CRUD interface. +Other files contain auxiliary code for `FabricConnector` which allows it to register/enroll +users and to obtain smart contract instances. + +### Grpc package +Contains server side gRPC handler. It accepts requests from the outside and performs the +requested operation. For the more detailed description see Proto package description right below. + +### Proto package +The proto package contains `dlt.proto` file which defines gRPC service `DltService` API and messages +it uses. There are 3 main functions: `RecordToDlt` which allows to create/modify/delete data, +`GetFromDlt` which returns already written data and `SubscribeToDlt` which allows clients subscribe +for future create/modify/delete events with provided filters. +Other proto files don't play any significant role and could be safely ignored by end users. + +### Client example +This code is not necessary to the service, but it could be used to test the service. It contains +a sample gRPC client which connects the service and perform all the CRUD operations. + +# Fabric deployment notes + +## General notes +Current Fabric deployment uses Fabric test network with some additional helping scripts on top of it. +To start the network just run the `raft.sh` from `blockchain/scripts` directory. Use `stop.sh` +when you need to stop the network. + +## Server start preparations +To run the server it's necessary to copy certificate file +`fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/ca/ca.org1.example.com-cert.pem` +to the config folder (replacing the existing one). Also, it's necessary to copy `scripts/connection-org1.json` +file (again, replacing the old one). After copying, it must be edited. First, all `localhost` entrances +should be replaced with `teraflow.nlehd.de`. Second, `channel` section at the end of the file should be removed. +This should be done after every restart of the Fabric network. + +## Fabric configuration +Even though a test network is easy to deploy and use it's better to perform a custom configuration +for a production deployment. In practice every participating organization will likely prefer to have +its own Peer/Orderer/CA instances to prevent possible dependency on any other participants. This leads +not only to a better privacy/availability/security in general but also to the more complicated +deployment process as a side effect. Here we provide a very brief description of the most important points. + +### Organizations +Organization represents a network participant, which can be an individual, a large corporation or any other +entity. Each organization has its own CAs, orderers and peers. The recommendation here is to create an +organization entity for every independent participant and then decide how many CAs/peers/orderers does +every organization need and which channels should it has access to based on the exact project's goals. + +### Channels +Each channel represents an independent ledger with its own genesis block. Each transaction is executed +on a specific channel, and it's possible to define which organization has access to a given channel. +As a result channels are a pretty powerful privacy mechanism which allows to limit access to the private +data between organization. + +### Certificate authorities, peers and orderers +Certificate authorities (CA) are used to generate crypto materials for each organization. Two types of CA +exist: one is used to generate the certificates of the admin, the MSP and certificates of non-admin users. +Another type of CA is used to generate TLS certificates. As a result it's preferable to have at least two +CAs for every organization. + +Peers are entities which host ledgers and smart contracts. They communicate with applications and orderers, +receiving chaincode invocations (proposals), invoking chaincode, updating ledger when necessary and +returning result of execution. Peers can handle one or many ledgers, depending on the configuration. It's +very use case specific how many peers are necessary to the exact deployment. + +Orderers are used to execute a consensus in a distributing network making sure that every channel participant +has the same blocks with the same data. The default consensus algorithm is Raft which provides only a crash +fault tolerance. + +### Conclusion +As you can see, configuration procedure for Fabric is pretty tricky and includes quite a lot of entities. +In real world it will very likely involve participants from multiple organizations each of them performing +its own part of configuration. + +As a further reading it's recommended to start with the +[official deployment guide](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployment_guide_overview.html). +It contains a high level overview of a deployment process as well as links to the detailed descriptions to +CA/Peer/Orderer configuration descriptions. \ No newline at end of file diff --git a/src/dlt/gateway/build.gradle.kts b/src/dlt/gateway/_legacy/build.gradle.kts similarity index 100% rename from src/dlt/gateway/build.gradle.kts rename to src/dlt/gateway/_legacy/build.gradle.kts diff --git a/src/dlt/gateway/config/ca.org1.example.com-cert.pem b/src/dlt/gateway/_legacy/config/ca.org1.example.com-cert.pem similarity index 100% rename from src/dlt/gateway/config/ca.org1.example.com-cert.pem rename to src/dlt/gateway/_legacy/config/ca.org1.example.com-cert.pem diff --git a/src/dlt/gateway/config/connection-org1.json b/src/dlt/gateway/_legacy/config/connection-org1.json similarity index 100% rename from src/dlt/gateway/config/connection-org1.json rename to src/dlt/gateway/_legacy/config/connection-org1.json diff --git a/src/dlt/gateway/gradle.properties b/src/dlt/gateway/_legacy/gradle.properties similarity index 100% rename from src/dlt/gateway/gradle.properties rename to src/dlt/gateway/_legacy/gradle.properties diff --git a/src/dlt/gateway/gradle/wrapper/gradle-wrapper.jar b/src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.jar similarity index 100% rename from src/dlt/gateway/gradle/wrapper/gradle-wrapper.jar rename to src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.jar diff --git a/src/dlt/gateway/gradle/wrapper/gradle-wrapper.properties b/src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.properties similarity index 100% rename from src/dlt/gateway/gradle/wrapper/gradle-wrapper.properties rename to src/dlt/gateway/_legacy/gradle/wrapper/gradle-wrapper.properties diff --git a/src/dlt/gateway/gradlew b/src/dlt/gateway/_legacy/gradlew similarity index 100% rename from src/dlt/gateway/gradlew rename to src/dlt/gateway/_legacy/gradlew diff --git a/src/dlt/gateway/gradlew.bat b/src/dlt/gateway/_legacy/gradlew.bat similarity index 100% rename from src/dlt/gateway/gradlew.bat rename to src/dlt/gateway/_legacy/gradlew.bat diff --git a/src/dlt/gateway/settings.gradle.kts b/src/dlt/gateway/_legacy/settings.gradle.kts similarity index 88% rename from src/dlt/gateway/settings.gradle.kts rename to src/dlt/gateway/_legacy/settings.gradle.kts index 77fa0f0b22918cf306f0e5f07506a35e492142b4..6500a488a10c31fba79da633993989e5a7e7ec40 100644 --- a/src/dlt/gateway/settings.gradle.kts +++ b/src/dlt/gateway/_legacy/settings.gradle.kts @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) + * Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/dlt/gateway/src/main/kotlin/Main.kt b/src/dlt/gateway/_legacy/src/main/kotlin/Main.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/Main.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/Main.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/ConnectGateway.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/ConnectGateway.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/EnrollAdmin.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/EnrollAdmin.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/EnrollAdmin.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/EnrollAdmin.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/FabricConnector.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/FabricConnector.kt diff --git a/src/dlt/gateway/src/main/kotlin/fabric/RegisterUser.kt b/src/dlt/gateway/_legacy/src/main/kotlin/fabric/RegisterUser.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/fabric/RegisterUser.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/fabric/RegisterUser.kt diff --git a/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt b/src/dlt/gateway/_legacy/src/main/kotlin/grpc/FabricServer.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/grpc/FabricServer.kt diff --git a/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt b/src/dlt/gateway/_legacy/src/main/kotlin/grpc/GrpcHandler.kt similarity index 100% rename from src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt rename to src/dlt/gateway/_legacy/src/main/kotlin/grpc/GrpcHandler.kt diff --git a/src/dlt/gateway/src/main/kotlin/proto/Config.proto b/src/dlt/gateway/_legacy/src/main/kotlin/proto/Config.proto similarity index 100% rename from src/dlt/gateway/src/main/kotlin/proto/Config.proto rename to src/dlt/gateway/_legacy/src/main/kotlin/proto/Config.proto diff --git a/src/dlt/gateway/chaincode/chaincode.go b/src/dlt/gateway/chaincode/chaincode.go new file mode 100644 index 0000000000000000000000000000000000000000..878cbe9423a7d2cac31e1857343dcdc61034180d --- /dev/null +++ b/src/dlt/gateway/chaincode/chaincode.go @@ -0,0 +1,200 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "log" + + "github.com/hyperledger/fabric-contract-api-go/contractapi" +) + +// Protobuf definitions +type Uuid struct { + Uuid string `json:"uuid"` +} + +type DltRecordId struct { + DomainUuid Uuid `json:"domain_uuid"` + Type string `json:"type"` + RecordUuid Uuid `json:"record_uuid"` +} + +type DltRecord struct { + RecordId DltRecordId `json:"record_id"` + DataJson string `json:"data_json"` +} + +type SmartContract struct { + contractapi.Contract +} + +// InitLedger activates the chaincode +func (s *SmartContract) InitLedger(ctx contractapi.TransactionContextInterface) error { + return nil +} + +func (s *SmartContract) StoreRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId, dataJson string) error { + + key, err := createHashKey(recordId) + if err != nil { + return fmt.Errorf("failed to create hash key: %v", err) + } + + // Check if the same record does not exist before adding it to the ledger + exists, err := s.RecordExists(ctx, key) + if err == nil && exists != nil { + return fmt.Errorf("the record %s already exists", key) + } + + // Trigger an event if the transaction is successful + storedRecord := DltRecord{ + RecordId: recordId, + DataJson: dataJson, + } + eventJson, err := json.Marshal(storedRecord) + if err != nil { + return fmt.Errorf("failed to marshal stored record: %v", err) + } + ctx.GetStub().SetEvent("StoreRecord", eventJson) + + // Store the record in the ledger + return ctx.GetStub().PutState(key, []byte(dataJson)) +} + +func (s *SmartContract) RetrieveRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId) (string, error) { + key, err := createHashKey(recordId) + if err != nil { + return "", fmt.Errorf("failed to create hash key: %v", err) + } + + // Get the record from the ledger + dataBytes, err := ctx.GetStub().GetState(key) + if err != nil || dataBytes == nil { + return "", fmt.Errorf("data not found for key %s", key) + } + return string(dataBytes), nil +} + +func (s *SmartContract) UpdateRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId, dataJson string) error { + key, err := createHashKey(recordId) + if err != nil { + return fmt.Errorf("failed to create hash key: %v", err) + } + + // Check if the record exists before updating it + _, err = s.RecordExists(ctx, key) + if err != nil { + return err + } + + // Trigger an event if the transaction is successful + eventData := DltRecord{RecordId: recordId, DataJson: dataJson} + eventJson, err := json.Marshal(eventData) + if err != nil { + return fmt.Errorf("failed to marshal event data: %v", err) + } + ctx.GetStub().SetEvent("UpdateRecord", eventJson) + + // Update the record in the ledger + return ctx.GetStub().PutState(key, []byte(dataJson)) +} + +func (s *SmartContract) DeleteRecord(ctx contractapi.TransactionContextInterface, recordId DltRecordId) error { + key, err := createHashKey(recordId) + if err != nil { + return fmt.Errorf("failed to create hash key: %v", err) + } + + // Check if the record exists before deleting it + exists, err := s.RecordExists(ctx, key) + if err != nil { + return err + } + + // Trigger an event if the transaction is successful + eventData := DltRecord{RecordId: recordId, DataJson: string(exists)} + eventJson, err := json.Marshal(eventData) + if err != nil { + return fmt.Errorf("failed to marshal event data: %v", err) + } + ctx.GetStub().SetEvent("DeleteRecord", eventJson) + + // Delete the record from the ledger + return ctx.GetStub().DelState(key) +} + +func (s *SmartContract) RecordExists(ctx contractapi.TransactionContextInterface, key string) ([]byte, error) { + jsonData, err := ctx.GetStub().GetState(key) + if err != nil { + return nil, fmt.Errorf("failed to read from world state: %v", err) + } + if jsonData == nil { + return nil, fmt.Errorf("the record %s does not exist", key) + } + return jsonData, nil +} + +func (s *SmartContract) GetAllRecords(ctx contractapi.TransactionContextInterface) ([]map[string]interface{}, error) { + // Range query with empty string for startKey and endKey does an + // open-ended query of all records in the chaincode namespace. + resultsIterator, err := ctx.GetStub().GetStateByRange("", "") + if err != nil { + return nil, err + } + defer resultsIterator.Close() + + var records []map[string]interface{} + for resultsIterator.HasNext() { + queryResponse, err := resultsIterator.Next() + if err != nil { + return nil, err + } + + var generic map[string]interface{} + if err := json.Unmarshal(queryResponse.Value, &generic); err != nil { + return nil, fmt.Errorf("invalid JSON data: %v", err) + } + + records = append(records, generic) + } + + return records, nil +} + +func createHashKey(recordId DltRecordId) (string, error) { + recordIdJson, err := json.Marshal(recordId) + if err != nil { + return "", fmt.Errorf("failed to marshal record ID: %v", err) + } + + hash := sha256.New() + hash.Write(recordIdJson) + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func main() { + recordChaincode, err := contractapi.NewChaincode(&SmartContract{}) + if err != nil { + log.Panicf("Error creating chaincode: %v", err) + } + + if err := recordChaincode.Start(); err != nil { + log.Panicf("Error starting chaincode: %v", err) + } +} diff --git a/src/dlt/gateway/chaincode/go.mod b/src/dlt/gateway/chaincode/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..27f86fbabd3d9b169f485ab22ccc42125421afa6 --- /dev/null +++ b/src/dlt/gateway/chaincode/go.mod @@ -0,0 +1,46 @@ +<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. --> + +module chaincode + +go 1.17 + +require github.com/hyperledger/fabric-contract-api-go v1.2.1 + +require ( + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/swag v0.21.1 // indirect + github.com/gobuffalo/envy v1.10.1 // indirect + github.com/gobuffalo/packd v1.0.1 // indirect + github.com/gobuffalo/packr v1.30.1 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/hyperledger/fabric-chaincode-go v0.0.0-20230228194215-b84622ba6a7a // indirect + github.com/hyperledger/fabric-protos-go v0.3.0 // indirect + github.com/joho/godotenv v1.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.53.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/src/dlt/gateway/chaincode/go.sum b/src/dlt/gateway/chaincode/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..8bb9581b596c7f9dd4528d14f5051aa03132421f --- /dev/null +++ b/src/dlt/gateway/chaincode/go.sum @@ -0,0 +1,1240 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cucumber/gherkin-go/v19 v19.0.3/go.mod h1:jY/NP6jUtRSArQQJ5h1FXOUgk5fZK24qtE7vKi776Vw= +github.com/cucumber/godog v0.12.6/go.mod h1:Y02TTpimPXDb70PnG6M3zpODXm1+bjCsuZzcW76xAww= +github.com/cucumber/messages-go/v16 v16.0.0/go.mod h1:EJcyR5Mm5ZuDsKJnT2N9KRnBK30BGjtYotDKpwQ0v6g= +github.com/cucumber/messages-go/v16 v16.0.1/go.mod h1:EJcyR5Mm5ZuDsKJnT2N9KRnBK30BGjtYotDKpwQ0v6g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.10.1 h1:ppDLoXv2feQ5nus4IcgtyMdHQkKng2lhJCIm33cblM0= +github.com/gobuffalo/envy v1.10.1/go.mod h1:AWx4++KnNOW3JOeEvhSaq+mvgAvnMYOY1XSIin4Mago= +github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= +github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr v1.30.1 h1:hu1fuVR3fXEZR7rXNW3h8rqSML8EVAf6KNm0NKO/wKg= +github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= +github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= +github.com/hashicorp/go-memdb v1.3.3/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hyperledger/fabric-chaincode-go v0.0.0-20230228194215-b84622ba6a7a h1:HwSCxEeiBthwcazcAykGATQ36oG9M+HEQvGLvB7aLvA= +github.com/hyperledger/fabric-chaincode-go v0.0.0-20230228194215-b84622ba6a7a/go.mod h1:TDSu9gxURldEnaGSFbH1eMlfSQBWQcMQfnDBcpQv5lU= +github.com/hyperledger/fabric-contract-api-go v1.2.1 h1:Ww9cKH/qHl5s6WqF+Ts5ju5eaBxC/awB/BJE+rOsEkM= +github.com/hyperledger/fabric-contract-api-go v1.2.1/go.mod h1:BhWve0gz1iH+Xc+cO3rmeIZI7YaTWOQodka9CgeUOgo= +github.com/hyperledger/fabric-protos-go v0.3.0 h1:MXxy44WTMENOh5TI8+PCK2x6pMj47Go2vFRKDHB2PZs= +github.com/hyperledger/fabric-protos-go v0.3.0/go.mod h1:WWnyWP40P2roPmmvxsUXSvVI/CF6vwY1K1UFidnKBys= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/src/dlt/gateway/dltApp/package-lock.json b/src/dlt/gateway/dltApp/package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..6fb245621a1e38f84d6806aae2f94fc83394b05a --- /dev/null +++ b/src/dlt/gateway/dltApp/package-lock.json @@ -0,0 +1,2420 @@ +{ + "name": "dlt_gateway", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "dlt_gateway", + "version": "1.0.0", + "license": "Apache-2.0", + "dependencies": { + "@grpc/grpc-js": "^1.10.8", + "@grpc/proto-loader": "^0.7.13", + "@hyperledger/fabric-gateway": "~1.4.0", + "dotenv": "^16.4.5", + "grpc-tools": "^1.12.4", + "protobufjs": "^7.3.0", + "uuid": "^9.0.1" + }, + "devDependencies": { + "@tsconfig/node18": "^18.2.2", + "@types/node": "^18.18.6", + "@typescript-eslint/eslint-plugin": "^6.9.0", + "@typescript-eslint/parser": "^6.9.0", + "eslint": "^8.52.0", + "typescript": "~5.2.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", + "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@grpc/grpc-js": { + "version": "1.10.8", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.10.8.tgz", + "integrity": "sha512-vYVqYzHicDqyKB+NQhAc54I1QWCBLCrYG6unqOIcBTHx+7x8C9lcoLj3KVJXs2VB4lUbpWY+Kk9NipcbXYWmvg==", + "dependencies": { + "@grpc/proto-loader": "^0.7.13", + "@js-sdsl/ordered-map": "^4.4.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/@grpc/proto-loader": { + "version": "0.7.13", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.13.tgz", + "integrity": "sha512-AiXO/bfe9bmxBjxxtYxFAXGZvMaN5s8kO+jBHAJCON8rJoB5YS/D6X7ZNc6XQkuHNmyl4CYaMI1fJ/Gn27RGGw==", + "dependencies": { + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.2.5", + "yargs": "^17.7.2" + }, + "bin": { + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "dev": true + }, + "node_modules/@hyperledger/fabric-gateway": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@hyperledger/fabric-gateway/-/fabric-gateway-1.4.0.tgz", + "integrity": "sha512-dJ0eJdGBo8wtZ/oR5mADHnllp+pSuVOI7uq5fRFf0NTVk1SzlX42Q3kt4j53bJQaxd21TMvofgXNO+BCgJcB/A==", + "dependencies": { + "@grpc/grpc-js": "^1.9.0", + "@hyperledger/fabric-protos": "^0.2.0", + "asn1.js": "^5.4.1", + "bn.js": "^5.2.1", + "elliptic": "^6.5.4", + "google-protobuf": "^3.21.0" + }, + "engines": { + "node": ">=18.12.0" + }, + "optionalDependencies": { + "pkcs11js": "^1.3.0" + } + }, + "node_modules/@hyperledger/fabric-protos": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@hyperledger/fabric-protos/-/fabric-protos-0.2.1.tgz", + "integrity": "sha512-qjm0vIQIfCall804tWDeA8p/mUfu14sl5Sj+PbOn2yDKJq+7ThoIhNsLAqf+BCxUfqsoqQq6AojhqQeTFyOOqg==", + "dependencies": { + "@grpc/grpc-js": "^1.9.0", + "google-protobuf": "^3.21.0" + }, + "engines": { + "node": ">=14.15.0" + } + }, + "node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", + "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/@mapbox/node-pre-gyp": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz", + "integrity": "sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==", + "dependencies": { + "detect-libc": "^2.0.0", + "https-proxy-agent": "^5.0.0", + "make-dir": "^3.1.0", + "node-fetch": "^2.6.7", + "nopt": "^5.0.0", + "npmlog": "^5.0.1", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.11" + }, + "bin": { + "node-pre-gyp": "bin/node-pre-gyp" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + }, + "node_modules/@tsconfig/node18": { + "version": "18.2.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node18/-/node18-18.2.4.tgz", + "integrity": "sha512-5xxU8vVs9/FNcvm3gE07fPbn9tl6tqGGWA9tSlwsUEkBxtRnTsNmwrV8gasZ9F/EobaSv9+nu8AxUKccw77JpQ==", + "dev": true + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@types/node": { + "version": "18.19.33", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.33.tgz", + "integrity": "sha512-NR9+KrpSajr2qBVp/Yt5TU/rp+b5Mayi3+OlMlcg2cVCfRmcG5PWZ7S4+MG9PZ5gWBoc9Pd0BKSRViuBCRPu0A==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/semver": { + "version": "7.5.8", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", + "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", + "dev": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true + }, + "node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/aproba": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz", + "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==" + }, + "node_modules/are-we-there-yet": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz", + "integrity": "sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==", + "deprecated": "This package is no longer supported.", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/asn1.js": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", + "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", + "dependencies": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0", + "safer-buffer": "^2.1.0" + } + }, + "node_modules/asn1.js/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/bn.js": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", + "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==" + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==" + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "bin": { + "color-support": "bin.js" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==" + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==" + }, + "node_modules/detect-libc": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dotenv": { + "version": "16.4.5", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz", + "integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/elliptic": { + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.5.tgz", + "integrity": "sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==", + "dependencies": { + "bn.js": "^4.11.9", + "brorand": "^1.1.0", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.1", + "inherits": "^2.0.4", + "minimalistic-assert": "^1.0.1", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/elliptic/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", + "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "dev": true + }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fs-minipass/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/gauge": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/gauge/-/gauge-3.0.2.tgz", + "integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==", + "deprecated": "This package is no longer supported.", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.2", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.1", + "object-assign": "^4.1.1", + "signal-exit": "^3.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/google-protobuf": { + "version": "3.21.2", + "resolved": "https://registry.npmjs.org/google-protobuf/-/google-protobuf-3.21.2.tgz", + "integrity": "sha512-3MSOYFO5U9mPGikIYCzK0SaThypfGgS6bHqrUGXG3DPHCrb+txNqeEcns1W0lkGfk0rCyNXm7xB9rMxnCiZOoA==" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/grpc-tools": { + "version": "1.12.4", + "resolved": "https://registry.npmjs.org/grpc-tools/-/grpc-tools-1.12.4.tgz", + "integrity": "sha512-5+mLAJJma3BjnW/KQp6JBjUMgvu7Mu3dBvBPd1dcbNIb+qiR0817zDpgPjS7gRb+l/8EVNIa3cB02xI9JLToKg==", + "hasInstallScript": true, + "dependencies": { + "@mapbox/node-pre-gyp": "^1.0.5" + }, + "bin": { + "grpc_tools_node_protoc": "bin/protoc.js", + "grpc_tools_node_protoc_plugin": "bin/protoc_plugin.js" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==" + }, + "node_modules/hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "dependencies": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "node_modules/hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==", + "dependencies": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/long": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", + "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==" + }, + "node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minizlib/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nan": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.19.0.tgz", + "integrity": "sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw==", + "optional": true + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/nopt": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", + "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npmlog": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz", + "integrity": "sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==", + "deprecated": "This package is no longer supported.", + "dependencies": { + "are-we-there-yet": "^2.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^3.0.0", + "set-blocking": "^2.0.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkcs11js": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/pkcs11js/-/pkcs11js-1.3.1.tgz", + "integrity": "sha512-eo7fTeQwYGzX1pFmRaf4ji/WcDW2XKpwqylOwzutsjNWECv6G9PzDHj3Yj5dX9EW/fydMnJG8xvWj/btnQT9TA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.15.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/PeculiarVentures" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/protobufjs": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.3.0.tgz", + "integrity": "sha512-YWD03n3shzV9ImZRX3ccbjqLxj7NokGN0V/ESiBV5xWqrommYHYiihuIyavq03pWSGqlyvYUFmfoMKd+1rPA/g==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/semver": { + "version": "7.6.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.1.tgz", + "integrity": "sha512-f/vbBsu+fOiYt+lmwZV0rVwJScl46HppnOA1ZvIuBWKOTlllpyJ3bfVax76/OrhCH38dyxoDIA8K7uB963IYgA==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/ts-api-utils": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", + "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==", + "dev": true, + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wide-align": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "dependencies": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/src/dlt/gateway/dltApp/package.json b/src/dlt/gateway/dltApp/package.json new file mode 100644 index 0000000000000000000000000000000000000000..9d29b5287e1d3085c1cede9ab73b23189f3228d3 --- /dev/null +++ b/src/dlt/gateway/dltApp/package.json @@ -0,0 +1,38 @@ +{ + "name": "dlt_gateway", + "version": "1.0.0", + "description": "A DLT application that record and manages network related data as JSON, implemented in typeScript using HLF fabric-gateway", + "main": "dist/index.js", + "typings": "dist/index.d.ts", + "engines": { + "node": ">=18" + }, + "scripts": { + "build": "tsc", + "build:watch": "tsc -w", + "lint": "eslint . --ext .ts", + "prepare": "npm run build", + "pretest": "npm run lint", + "start": "node dist/dlt_gateway.js" + }, + "engineStrict": true, + "author": "Javier Jose Diaz (CTTC)", + "license": "Apache-2.0", + "dependencies": { + "@grpc/grpc-js": "^1.10.8", + "@grpc/proto-loader": "^0.7.13", + "@hyperledger/fabric-gateway": "~1.4.0", + "dotenv": "^16.4.5", + "grpc-tools": "^1.12.4", + "protobufjs": "^7.3.0", + "uuid": "^9.0.1" + }, + "devDependencies": { + "@tsconfig/node18": "^18.2.2", + "@types/node": "^18.18.6", + "@typescript-eslint/eslint-plugin": "^6.9.0", + "@typescript-eslint/parser": "^6.9.0", + "eslint": "^8.52.0", + "typescript": "~5.2.2" + } +} diff --git a/src/dlt/gateway/dltApp/src/dltGateway.js b/src/dlt/gateway/dltApp/src/dltGateway.js new file mode 100644 index 0000000000000000000000000000000000000000..656397dc88bfd8289b53e72a8d1dbde9eab32594 --- /dev/null +++ b/src/dlt/gateway/dltApp/src/dltGateway.js @@ -0,0 +1,262 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const path = require('path'); +const { connectToNetwork } = require('../dist/fabricConnect'); +const utf8Decoder = new TextDecoder(); + +// Load the protocol buffer definitions +const PROTO_PATH = path.resolve(__dirname, '../proto/dlt_gateway.proto'); +const packageDefinition = protoLoader.loadSync(PROTO_PATH, { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, +}); +const dltProto = grpc.loadPackageDefinition(packageDefinition).dlt; + +// Create a gRPC server instance +const server = new grpc.Server(); +let contractInstance = null; +let closeConnection = null; +let events = null; // To store the events iterable + +const clients = new Set(); // Set to keep track of active client streams + + + +// Initialize connection to the chaincode + +async function initChaincodeConnection() { + try { + const networkResources = await connectToNetwork(); + contractInstance = networkResources.contract; + events = networkResources.events; // Initiate event listening + closeConnection = networkResources.close; + + //console.log("DEBUG", events) + console.log("Chaincode connection established successfully."); + } catch (error) { + console.error('Failed to establish chaincode connection:', error); + process.exit(1); // Exit if the connection cannot be established + } +} + +// gRPC method to handle recording data to the DLT +async function recordToDlt(call, callback) { + if (!contractInstance) { + callback({ + code: grpc.status.UNAVAILABLE, + details: "Chaincode connection is not established." + }); + return; + } + const { record_id, operation, data_json } = call.request; + try { + + + console.log(`Operation requested: ${operation}`); + + switch (operation) { + case 'DLTRECORDOPERATION_ADD': + await contractInstance.submitTransaction('StoreRecord', JSON.stringify(record_id), data_json); + break; + case 'DLTRECORDOPERATION_UPDATE': + await contractInstance.submitTransaction('UpdateRecord', JSON.stringify(record_id), data_json); + break; + case 'DLTRECORDOPERATION_DELETE': + await contractInstance.submitTransaction('DeleteRecord', JSON.stringify(record_id)); + break; + default: + throw new Error('Invalid operation'); + } + // Send success response + callback(null, { record_id, status: 'DLTRECORDSTATUS_SUCCEEDED' }); + } catch (error) { + // Send failure response with error message + console.log("ERRROR", error) + callback(null, { record_id, status: 'DLTRECORDSTATUS_FAILED', error_message: error.message }); + } +} + +// gRPC method to fetch data from the DLT +async function getFromDlt(call, callback) { + + if (!contractInstance) { + callback({ + code: grpc.status.UNAVAILABLE, + details: "Chaincode connection is not established." + }); + return; + } + + try { + + console.log("RECEIVED CALL REQUEST:", call.request); + //const { record_id, operation, data_json } = call.request; + const resultBytes = await contractInstance.evaluateTransaction('RetrieveRecord', JSON.stringify(call.request)); + // Decode and parse the result + const resultJson = utf8Decoder.decode(resultBytes); + const result = JSON.parse(resultJson); + + // Send the response with the formatted JSON data + callback(null, { record_id: call.request, operation: result.operation, data_json: result.data_json }); + } catch (error) { + if (error.message.includes("data not found for key")) { + // Return an empty response when no record is found + console.log("REQUEST ERROR:", error); + const emptyRecordId = { + domain_uuid: { uuid: "" }, + type: 'DLTRECORDTYPE_UNDEFINED', + record_uuid: { uuid: "" } + }; + callback(null, { record_id: emptyRecordId, data_json: "" }); + } else { + // Send failure response with error message + callback({ + code: grpc.status.UNKNOWN, + details: error.message + }); + } + } +} + +// Implement subscription to DLT events + +const eventNameToEventTypeEnum = { + 'StoreRecord': 'EVENTTYPE_CREATE', + 'UpdateRecord': 'EVENTTYPE_UPDATE', + 'DeleteRecord': 'EVENTTYPE_REMOVE' +}; + +function subscribeToDlt(call) { + if (!events) { + call.emit('error', { + code: grpc.status.UNAVAILABLE, + details: "Event listener is not established." + }); + return; + } + + // Add the client to the set of active clients + clients.add(call); + console.log(`Client connected. Total clients: ${clients.size}`); + + // Clean up when the client disconnects + call.on('cancelled', () => { + clients.delete(call); + console.log(`Client disconnected (cancelled). Total clients: ${clients.size}`); + + }); + call.on('error', (err) => { + clients.delete(call); + console.log(`Client disconnected (error: ${err.message}). Total clients: ${clients.size}`); + + }); + call.on('end', () => { + clients.delete(call); + console.log(`Client disconnected (end). Total clients: ${clients.size}`); + + }); + + (async () => { + try { + for await (const event of events) { + const eventPayload = event.payload; + //console.log("Raw event payload:", eventPayload); + const resultJson = utf8Decoder.decode(eventPayload); + const eventJson = JSON.parse(resultJson); + + console.log("Writing event to stream:", eventJson.record_id); + + const eventType = eventNameToEventTypeEnum[event.eventName] || 'EVENTTYPE_UNDEFINED'; + + for (const client of clients) { + const writeSuccessful = client.write({ + event: { + timestamp: { timestamp: Math.floor(Date.now() / 1000) }, + event_type: eventType // Set appropriate event type + }, + record_id: { + domain_uuid: { uuid: eventJson.record_id.domain_uuid.uuid }, + type: eventJson.record_id.type || 'DLTRECORDTYPE_UNDEFINED', + record_uuid: { uuid: eventJson.record_id.record_uuid.uuid } + } + }); + + // Check if the internal buffer is full + if (!writeSuccessful) { + // Wait for the 'drain' event before continuing + await new Promise((resolve) => client.once('drain', resolve)); + } + } + } + + //call.end(); + + } catch (error) { + for (const client of clients) { + client.emit('error', { + code: grpc.status.UNKNOWN, + details: `Error processing event: ${error.message}` + }); + + } + } + })(); +} + +// Placeholder +function getDltStatus(call, callback) { + // Implement status fetching logic here + // Not implemented for simplicity +} + +// Placeholder +function getDltPeers(call, callback) { + // Implement peers fetching logic here + // Not implemented for simplicity +} + +// Add the service to the server +server.addService(dltProto.DltGatewayService.service, { + RecordToDlt: recordToDlt, + GetFromDlt: getFromDlt, + SubscribeToDlt: subscribeToDlt, + GetDltStatus: getDltStatus, + GetDltPeers: getDltPeers, +}); + +// Start the server +const PORT = process.env.GRPC_PORT || '50051'; +server.bindAsync(`0.0.0.0:${PORT}`, grpc.ServerCredentials.createInsecure(), async (error) => { + if (error) { + console.error('Failed to bind server:', error); + return; + } + console.log(`gRPC server running at http://0.0.0.0:${PORT}`); + await initChaincodeConnection(); //Connects to the chaincode and + server; +}); + +// Handle shutdown gracefully +process.on('SIGINT', async () => { + console.log('Shutting down...'); + await closeConnection(); + server.forceShutdown(); +}); \ No newline at end of file diff --git a/src/dlt/gateway/dltApp/src/fabricConnect.ts b/src/dlt/gateway/dltApp/src/fabricConnect.ts new file mode 100644 index 0000000000000000000000000000000000000000..973fd6077cd3e4ee7d397f8625f4a399935183f9 --- /dev/null +++ b/src/dlt/gateway/dltApp/src/fabricConnect.ts @@ -0,0 +1,209 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as grpc from '@grpc/grpc-js'; +import { connect, Contract, Identity, Signer, signers, Network, CloseableAsyncIterable, ChaincodeEvent, GatewayError } from '@hyperledger/fabric-gateway'; +import * as crypto from 'crypto'; +import { promises as fs } from 'fs'; +import * as path from 'path'; +import { TextDecoder } from 'util'; +import * as dotenv from 'dotenv'; + +dotenv.config({ path: path.resolve(__dirname, '..', '.env') }); +const channelName = getEnvVar('CHANNEL_NAME'); +const chaincodeName = getEnvVar('CHAINCODE_NAME'); +const mspId = getEnvVar('MSP_ID'); + + +// Path to user private key directory. +const keyDirectoryPath = getEnvVar('KEY_DIRECTORY_PATH'); + +// Path to user certificate directory. +const certDirectoryPath = getEnvVar('CERT_DIRECTORY_PATH'); + +// Path to peer tls certificate. +const tlsCertPath = getEnvVar('TLS_CERT_PATH'); + +// Gateway peer endpoint. +const peerEndpoint = getEnvVar('PEER_ENDPOINT'); + +// Gateway peer SSL host name override. +const peerHostAlias = getEnvVar('PEER_HOST_ALIAS'); + +const utf8Decoder = new TextDecoder(); +const assetId = `asset${Date.now()}`; + +export async function connectToNetwork(): Promise<{ contract: Contract, events: CloseableAsyncIterable<ChaincodeEvent>, close: () => Promise<void> }> { + + await displayInputParameters(); + + // The gRPC client connection should be shared by all Gateway connections to this endpoint. + + const client = await newGrpcConnection(); + + const gateway = connect({ + client, + identity: await newIdentity(), + signer: await newSigner(), + // Default timeouts for different gRPC calls + evaluateOptions: () => { + return { deadline: Date.now() + 5000 }; // 5 seconds + }, + endorseOptions: () => { + return { deadline: Date.now() + 15000 }; // 15 seconds + }, + submitOptions: () => { + return { deadline: Date.now() + 5000 }; // 5 seconds + }, + commitStatusOptions: () => { + return { deadline: Date.now() + 60000 }; // 1 minute + }, + }); + + let events: CloseableAsyncIterable<ChaincodeEvent> | undefined; + + + // Get a network instance representing the channel where the smart contract is deployed. + const network = gateway.getNetwork(channelName); + + // Get the smart contract from the network. + const contract = network.getContract(chaincodeName); + + //Listen for events emitted by transactions + //events = await startEventListening(network); + events = await network.getChaincodeEvents(chaincodeName); + + // Initialize the ledger. + await initLedger(contract); + + console.log(Date.now()) + + return { + contract: contract, + events: events, + close: async function () { + if (events) events.close(); + gateway.close(); + client.close(); + } + }; + + +} + +async function newGrpcConnection(): Promise<grpc.Client> { + const tlsRootCert = await fs.readFile(tlsCertPath); + const tlsCredentials = grpc.credentials.createSsl(tlsRootCert); + return new grpc.Client(peerEndpoint, tlsCredentials, { + 'grpc.ssl_target_name_override': peerHostAlias, + }); +} + +async function newIdentity(): Promise<Identity> { + //const certPath = await getFirstDirFileName(certDirectoryPath); + //console.log("DEBUG", certDirectoryPath); + const credentials = await fs.readFile(certDirectoryPath); + return { mspId, credentials }; +} + + +async function newSigner(): Promise<Signer> { + //const keyPath = await getFirstDirFileName(keyDirectoryPath); + //console.log("DEBUG2", keyDirectoryPath); + const privateKeyPem = await fs.readFile(keyDirectoryPath); + const privateKey = crypto.createPrivateKey(privateKeyPem); + return signers.newPrivateKeySigner(privateKey); +} + +/** + * This type of transaction would typically only be run once by an application the first time it was started after its + * initial deployment. A new version of the chaincode deployed later would likely not need to run an "init" function. + */ +async function initLedger(contract: Contract): Promise<void> { + try { + console.log('\n--> Submit Transaction: InitLedger, function activates the chaincode'); + + await contract.submitTransaction('InitLedger'); + + console.log('*** Transaction committed successfully'); + } catch (error) { + console.error('Failed to submit InitLedger transaction:', error); + throw error; + } +} + + + +/** + * getEnvVar() will return the value of an environment variable. + */ +function getEnvVar(varName: string): string { + const value = process.env[varName]; + if (!value) { + throw new Error(`Environment variable ${varName} is not set`); + } + return value; +} + +/** + * displayInputParameters() will print the global scope parameters used by the main driver routine. + */ +async function displayInputParameters(): Promise<void> { + console.log(`channelName: ${channelName}`); + console.log(`chaincodeName: ${chaincodeName}`); + console.log(`mspId: ${mspId}`); + console.log(`keyDirectoryPath: ${keyDirectoryPath}`); + console.log(`certDirectoryPath: ${certDirectoryPath}`); + console.log(`tlsCertPath: ${tlsCertPath}`); + console.log(`peerEndpoint: ${peerEndpoint}`); + console.log(`peerHostAlias: ${peerHostAlias}`); +} + +/** + * startEventListening() will initiate the event listener for chaincode events. + */ +async function startEventListening(network: Network): Promise<CloseableAsyncIterable<ChaincodeEvent>> { + console.log('\n*** Start chaincode event listening'); + + const events = await network.getChaincodeEvents(chaincodeName); + + void readEvents(events); // Don't await - run asynchronously + return events; +} + +/** + * readEvents() format and display the events as a JSON. + */ +async function readEvents(events: CloseableAsyncIterable<ChaincodeEvent>): Promise<void> { + try { + for await (const event of events) { + const payload = parseJson(event.payload); + console.log(`\n<-- Chaincode event received: ${event.eventName} -`, payload); + } + } catch (error: unknown) { + // Ignore the read error when events.close() is called explicitly + if (!(error instanceof GatewayError) || error.code !== grpc.status.CANCELLED.valueOf()) { + throw error; + } + } +} + +/** + * parseJson() formats a JSON. + */ +function parseJson(jsonBytes: Uint8Array): unknown { + const json = utf8Decoder.decode(jsonBytes); + return JSON.parse(json); +} + diff --git a/src/dlt/gateway/dltApp/tsconfig.json b/src/dlt/gateway/dltApp/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..34eddef69221adeca4861399d574047a8c9ae6f1 --- /dev/null +++ b/src/dlt/gateway/dltApp/tsconfig.json @@ -0,0 +1,17 @@ +{ + "extends":"@tsconfig/node18/tsconfig.json", + "compilerOptions": { + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "outDir": "dist", + "declaration": true, + "sourceMap": true, + "noImplicitAny": true + }, + "include": [ + "./src/**/*" ], + "exclude": [ + "./src/**/*.spec.ts" + ] + } + \ No newline at end of file diff --git a/src/dlt/gateway/keys/.gitignore b/src/dlt/gateway/keys/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b54a37fc5e1fa51616f1de2ef20651cb28504040 --- /dev/null +++ b/src/dlt/gateway/keys/.gitignore @@ -0,0 +1,3 @@ +ca.crt +cert.pem +priv_sk diff --git a/src/dlt/gateway/keys/place_hls_keys_in_this_folder b/src/dlt/gateway/keys/place_hls_keys_in_this_folder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/dlt/gateway/samples/sampleTopo.json b/src/dlt/gateway/samples/sampleTopo.json new file mode 100644 index 0000000000000000000000000000000000000000..67115d4be322ed8567688c51a68a95c2b0800138 --- /dev/null +++ b/src/dlt/gateway/samples/sampleTopo.json @@ -0,0 +1,30 @@ +{ + "name": "Network A", + "nodes": [ + { + "id": "node1", + "type": "switch", + "status": "active", + "connections": [ + "node2", + "node3" + ] + }, + { + "id": "node2", + "type": "router", + "status": "inactive", + "connections": [ + "node1" + ] + }, + { + "id": "node3", + "type": "server", + "status": "active", + "connections": [ + "node1" + ] + } + ] +} diff --git a/src/dlt/gateway/samples/topo1.json b/src/dlt/gateway/samples/topo1.json new file mode 100644 index 0000000000000000000000000000000000000000..e4a49981f99339d77538af814365703e463d3db5 --- /dev/null +++ b/src/dlt/gateway/samples/topo1.json @@ -0,0 +1,96 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "eth2", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "eth2", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "SRL1"}}, "device_type": "packet-router", "device_drivers": [8], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.100.100.101"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "57400"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "NokiaSrl1!", "use_tls": true + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "SRL2"}}, "device_type": "packet-router", "device_drivers": [8], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.100.100.102"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "57400"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "NokiaSrl1!", "use_tls": true + }}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "DC1/eth1==SRL1/ethernet-1/2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "SRL1/ethernet-1/2==DC1/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}}, + {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "SRL1/ethernet-1/1==SRL2/ethernet-1/1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "SRL2/ethernet-1/1==SRL1/ethernet-1/1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL1"}}, "endpoint_uuid": {"uuid": "ethernet-1/1"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "DC2/eth1==SRL2/ethernet-1/2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "SRL2/ethernet-1/2==DC2/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "SRL2"}}, "endpoint_uuid": {"uuid": "ethernet-1/2"}}, + {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + } + ] +} diff --git a/src/dlt/gateway/samples/topo2.json b/src/dlt/gateway/samples/topo2.json new file mode 100644 index 0000000000000000000000000000000000000000..6885c7d9082bfcb17447252636ea4efb0f500283 --- /dev/null +++ b/src/dlt/gateway/samples/topo2.json @@ -0,0 +1,210 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R6"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/6"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R7"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/1"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/2"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/3"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/4"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/5"}, + {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "2/6"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "R1==R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1==R6"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R4==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R4==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R6"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R6==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R6==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]} + ] +} diff --git a/src/dlt/gateway/samples/topo3.json b/src/dlt/gateway/samples/topo3.json new file mode 100644 index 0000000000000000000000000000000000000000..f36fbd7d03a93db61a7233e084f60e7680a54606 --- /dev/null +++ b/src/dlt/gateway/samples/topo3.json @@ -0,0 +1,200 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "name": "admin"} + ], + "topologies": [ + {"topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, "name": "admin"} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "be1.be", "type": "copper"}, {"uuid": "pt1.pt", "type": "copper"}, {"uuid": "uk1.uk", "type": "copper"}, + {"uuid": "es1.es", "type": "copper"}, {"uuid": "it1.it", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "de1.de", "type": "copper"}, {"uuid": "gr1.gr", "type": "copper"}, {"uuid": "uk1.uk", "type": "copper"}, + {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "it1.it", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "de1.de", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}, + {"uuid": "pt1.pt", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "uk1.uk", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}, {"uuid": "gr1.gr", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "uk1.uk", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "es1.es", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "it1.it", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "pt1.pt", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "es1.es", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}, + {"uuid": "gr1.gr", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"}, + {"uuid": "it1.it", "type": "copper"}, {"uuid": "de1.de", "type": "copper"}, {"uuid": "be1.be", "type": "copper"} + ]}}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "fr1.fr_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 4.804849}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_fr1.fr"}}, "attributes": {"total_capacity_gbps": 300, "used_capacity_gbps": 55.182499}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_de1.de"}}, "attributes": {"total_capacity_gbps": 600, "used_capacity_gbps": 199.272255}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "de1.de"}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 14.334868}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "pt1.pt_uk1.uk"}}, "attributes": {"total_capacity_gbps": 400, "used_capacity_gbps": 51.415678}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "pt1.pt"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "pt1.pt_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 3.733925}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "pt1.pt"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "pt1.pt_es1.es"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 13.32428}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "es1.es"}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "pt1.pt"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_gr1.gr"}}, "attributes": {"total_capacity_gbps": 800, "used_capacity_gbps": 1.593313}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_fr1.fr"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 98.574706}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_es1.es"}}, "attributes": {"total_capacity_gbps": 300, "used_capacity_gbps": 18.97108}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "es1.es"}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "it1.it_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 10.327772}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "it1.it"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "gr1.gr_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 7.983659}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "gr1.gr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "gr1.gr_de1.de"}}, "attributes": {"total_capacity_gbps": 5000, "used_capacity_gbps": 4930.897339}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "de1.de"}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "gr1.gr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "gr1.gr_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.895539}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "gr1.gr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_uk1.uk"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 28.144199}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_pt1.pt"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 1.916587}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 3.330747}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "fr1.fr_es1.es"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 96.682749}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "es1.es"}}, + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "fr1.fr"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "es1.es_pt1.pt"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 5.643483}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "es1.es"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "es1.es_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 15.353667}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "es1.es"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "es1.es_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 20.517778}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "es1.es"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "de1.de_uk1.uk"}}, "attributes": {"total_capacity_gbps": 600, "used_capacity_gbps": 239.446965}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "de1.de"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "de1.de_gr1.gr"}}, "attributes": {"total_capacity_gbps": 2100, "used_capacity_gbps": 110.602237}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "de1.de"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "de1.de_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 57.709307}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "be1.be"}}, + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "de1.de"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "uk1.uk_pt1.pt"}}, "attributes": {"total_capacity_gbps": 800, "used_capacity_gbps": 652.70225}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}, + {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "uk1.uk"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_uk1.uk"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 8.252107}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}, + {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.357069}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "it1.it"}}, + {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_de1.de"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 20.400142}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "de1.de"}}, + {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 31.346514}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}, + {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "be1.be_gr1.gr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.026822}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}, + {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "be1.be"}} + ]} + ] +} diff --git a/src/dlt/gateway/samples/topo4.json b/src/dlt/gateway/samples/topo4.json new file mode 100644 index 0000000000000000000000000000000000000000..85bbad55eb8608d2d2a7abad7fffab8fffdae682 --- /dev/null +++ b/src/dlt/gateway/samples/topo4.json @@ -0,0 +1,150 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "T1"}}, "device_type": "optical-transponder", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "T1"}}, "endpoint_uuid": {"uuid": "1"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.101"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [{"uuid": "1", "type": "optical", "sample_types": [101, 102, 201, 202]}] + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "T2"}}, "device_type": "optical-transponder", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "T2"}}, "endpoint_uuid": {"uuid": "6"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.102"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [{"uuid": "6", "type": "optical", "sample_types": [101, 102, 201, 202]}] + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "optical-roadm", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "3"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "12"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "13"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.201"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "2"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "3"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "12"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "13"} + ]} + }}] + } + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "optical-roadm", "device_drivers": [11], + "device_operational_status": 1, + "device_endpoints": [ + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "4"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "5"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "14"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }}, + {"endpoint_id": { + "device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "15"}, + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + }} + ], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.254.253.202"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "2022"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, + "look_for_keys": false, "allow_agent": false, "commit_per_rule": false, + "device_params": {"name": "default"}, "manager_params": {"timeout": 120}, + "endpoints": [ + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "4"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "5"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "14"}, + {"sample_types": [101, 102, 201, 202], "type": "optical", "uuid": "15"} + ] + }}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "T1->R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "12"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1->T1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "T1"}}, "endpoint_uuid": {"uuid": "1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1->R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "14"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2->R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "13"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T2->R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "15"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2->T2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "T2"}}, "endpoint_uuid": {"uuid": "6"}} + ]} + ] +} diff --git a/src/dlt/gateway/samples/updatedTopo.json b/src/dlt/gateway/samples/updatedTopo.json new file mode 100644 index 0000000000000000000000000000000000000000..74b084b9d9c48de89ddf6122f46316a74c1a4fca --- /dev/null +++ b/src/dlt/gateway/samples/updatedTopo.json @@ -0,0 +1,17 @@ +{ + "name": "Updated Network", + "nodes": [ + { + "id": "node1", + "type": "switch", + "status": "active", + "connections": ["node2"] + }, + { + "id": "node2", + "type": "router", + "status": "inactive", + "connections": ["node1"] + } + ] +} diff --git a/src/dlt/gateway/tests/perfTest.js b/src/dlt/gateway/tests/perfTest.js new file mode 100644 index 0000000000000000000000000000000000000000..fd60bd9c4bba04a7c2a048d26389dd66381b5749 --- /dev/null +++ b/src/dlt/gateway/tests/perfTest.js @@ -0,0 +1,119 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { connectToNetwork } = require('../dltApp/dist/fabricConnect'); +const fsp = require('fs').promises; +const fs = require('fs'); +const util = require('util'); + +const utf8Decoder = new TextDecoder(); +const topoDirectory = '../samples/'; +const topologies = ['topo1.json', 'topo2.json', 'topo3.json', 'topo4.json']; +//const topologies = ['topo4.json']; + +const iterations = 1000; + +async function main() { + try { + const { contract, close } = await connectToNetwork(); + for (const topoFile of topologies) { + const logFilePath = `./operation_times_${topoFile.split('.')[0]}.txt`; // Creates a separate logfile for each topology + const appendFile = util.promisify(fs.appendFile.bind(fs, logFilePath)); + + console.log(`Starting tests for ${topoFile}`); + for (let i = 0; i < iterations; i++) { + console.log(`Iteration ${i + 1} for ${topoFile}`); + await runBlockchainOperations(contract, topoFile, appendFile); + } + } + await close(); // Clean up the connection + } catch (error) { + console.error('An error occurred:', error); + } +} + +async function runBlockchainOperations(contract, topoFile, appendFile) { + const assetId = `asset${Date.now()}`; + const jsonData = await readJsonData(`${topoDirectory}${topoFile}`); + + // Define operations + const operations = [ + { type: 'STORE', jsonData }, + { type: 'UPDATE', jsonData }, + { type: 'FETCH', jsonData: null }, + { type: 'DELETE', jsonData: null }, + { type: 'FETCH_ALL', jsonData: null } + ]; + + for (let op of operations) { + await executeOperation(contract, op.type, assetId, op.jsonData, appendFile); + } +} + +async function readJsonData(filePath) { + try { + return await fsp.readFile(filePath, 'utf8'); + } catch (error) { + console.error(`Failed to read file: ${filePath}`, error); + return '{}'; + } +} + +async function executeOperation(contract, operationType, assetId, jsonData, appendFile) { + const startTime = process.hrtime.bigint(); + try { + let result; + switch (operationType) { + case 'STORE': + result = await contract.submitTransaction('StoreTopoData', assetId, jsonData); + break; + case 'UPDATE': + result = await contract.submitTransaction('UpdateTopoData', assetId, jsonData); + break; + case 'FETCH': + result = await contract.evaluateTransaction('RetrieveTopoData', assetId); + break; + case 'DELETE': + result = await contract.submitTransaction('DeleteTopo', assetId); + break; + case 'FETCH_ALL': + result = await contract.evaluateTransaction('GetAllInfo'); + break; + } + result = utf8Decoder.decode(result); + const operationTime = recordOperationTime(startTime); + await logOperationTime(operationTime, operationType, appendFile); + console.log(`${operationType} Result:`, result); + } catch (error) { + console.error(`Error during ${operationType}:`, error); + } +} + +function recordOperationTime(startTime) { + const endTime = process.hrtime.bigint(); + const operationTime = Number(endTime - startTime) / 1e6; + return operationTime; +} + +async function logOperationTime(operationTime, operationType, appendFile) { + const timestamp = Date.now(); + const logEntry = `${timestamp} - ${operationType} - Execution time: ${operationTime.toFixed(3)} ms\n`; + try { + await appendFile(logEntry); + } catch (error) { + console, error('Error writing to log file:', error); + } +} + +main(); diff --git a/src/dlt/gateway/tests/rateTest.js b/src/dlt/gateway/tests/rateTest.js new file mode 100644 index 0000000000000000000000000000000000000000..f7c40884293220c0ab235322d74aee61074a281c --- /dev/null +++ b/src/dlt/gateway/tests/rateTest.js @@ -0,0 +1,85 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { connectToNetwork } = require('../dltApp/dist/fabricConnect'); +const fs = require('fs'); +const util = require('util'); +const appendFile = util.promisify(fs.appendFile); +const logFilePath = './transaction_times_TPS_TOPO3.txt'; +const utf8Decoder = new TextDecoder(); +const topoDirectory = '../samples/'; + +async function main() { + const { contract, close } = await connectToNetwork(); + try { + const rates = [10, 50, 250, 500]; // Transactions per second + for (let i = 0; i < 1000; i++) { + for (let rate of rates) { + console.log(`Testing at ${rate} TPS`); + await performLoadTest(contract, 1000, rate); + } + } + } finally { + await close(); // Ensure to close the network connection + } +} + +async function performLoadTest(contract, totalTransactions, rate) { + const interval = 1000 / rate; // Calculate interval in milliseconds + let promises = []; + const startTime = Date.now(); + + for (let i = 0; i < totalTransactions; i++) { + // Queue a transaction promise + promises.push(sendTransaction(contract, `asset${Date.now() + i}`)); + + // Process in batches according to the rate + if ((i + 1) % rate === 0 || i === totalTransactions - 1) { + await Promise.all(promises); // Send a batch of transactions + promises = []; // Reset for the next batch + if (i < totalTransactions - 1) { + await new Promise(resolve => setTimeout(resolve, interval)); // Throttle the transaction sending + } + } + } + + const endTime = Date.now(); + const totalTime = endTime - startTime; + const actualRate = totalTransactions / (totalTime / 1000); + console.log(`Total time for ${totalTransactions} transactions at target ${rate} TPS: ${totalTime} ms`); + console.log(`Actual rate achieved: ${actualRate.toFixed(2)} TPS`); + await appendFile(logFilePath, `Target Rate: ${rate} TPS, Total Time: ${totalTime} ms, Actual Rate: ${actualRate.toFixed(2)} TPS\n`); +} + +async function sendTransaction(contract, assetId) { + try { + const jsonData = await readJsonData(`${topoDirectory}topo3.json`); + const result = await contract.submitTransaction('StoreTopoData', assetId, jsonData); + return utf8Decoder.decode(result); + } catch (error) { + console.error('Transaction failed:', error); + return null; + } +} + +async function readJsonData(filePath) { + try { + return await fs.promises.readFile(filePath, 'utf8'); + } catch (error) { + console.error(`Failed to read file: ${filePath}`, error); + return '{}'; + } +} + +main().catch(console.error); diff --git a/src/dlt/gateway/tests/simpleTest.js b/src/dlt/gateway/tests/simpleTest.js new file mode 100644 index 0000000000000000000000000000000000000000..72da03dec16b91c7b6acc21e339f2844d74153be --- /dev/null +++ b/src/dlt/gateway/tests/simpleTest.js @@ -0,0 +1,74 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { connectToNetwork } = require('../dltApp/dist/fabricConnect'); +const fs = require('fs'); +const util = require('util'); +const appendFile = util.promisify(fs.appendFile); +const logFilePath = './transaction_times_TPS2.txt'; +const utf8Decoder = new TextDecoder(); +const topoDirectory = '../samples/'; + +async function main() { + const { contract, close } = await connectToNetwork(); + try { + console.log('Testing with 8 consecutive transactions'); + await performLoadTest(contract); + } finally { + await close(); // Ensure to close the network connection + } +} + +async function performLoadTest(contract) { + const totalTransactions = 500; + const promises = []; + const startTime = Date.now(); + + for (let i = 0; i < totalTransactions; i++) { + // Queue a transaction promise + promises.push(sendTransaction(contract, `asset${startTime}_${i}`)); + } + + await Promise.all(promises); // Send all transactions + + const endTime = Date.now(); + const totalTime = endTime - startTime; + const actualRate = totalTransactions / (totalTime / 1000); + console.log(`Total time for ${totalTransactions} transactions: ${totalTime} ms`); + console.log(`Actual rate achieved: ${actualRate.toFixed(2)} TPS`); + await appendFile(logFilePath, `Total Transactions: ${totalTransactions}, Total Time: ${totalTime} ms, Actual Rate: ${actualRate.toFixed(2)} TPS\n`); +} + +async function sendTransaction(contract, assetId) { + try { + const jsonData = await readJsonData(`${topoDirectory}topo4.json`); + //const jsonData = JSON.stringify({ data: `Data for ${assetId}`}); + const result = await contract.submitTransaction('StoreTopoData', assetId, jsonData); + return utf8Decoder.decode(result); + } catch (error) { + console.error('Transaction failed:', error); + return null; + } +} + +async function readJsonData(filePath) { + try { + return await fs.promises.readFile(filePath, 'utf8'); + } catch (error) { + console.error(`Failed to read file: ${filePath}`, error); + return '{}'; + } +} + +main().catch(console.error); diff --git a/src/dlt/gateway/tests/testEvents.js b/src/dlt/gateway/tests/testEvents.js new file mode 100644 index 0000000000000000000000000000000000000000..6e18e803202232a9613264dd49e6beee14546d14 --- /dev/null +++ b/src/dlt/gateway/tests/testEvents.js @@ -0,0 +1,66 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const path = require('path'); + +const PROTO_PATH = path.resolve(__dirname, '../../../../proto/dlt_gateway.proto'); +const packageDefinition = protoLoader.loadSync(PROTO_PATH, { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, +}); +const dltProto = grpc.loadPackageDefinition(packageDefinition).dlt; + +const client = new dltProto.DltGatewayService( + '127.0.0.1:32001', // Replace with TFS server IP_ADDRESS + grpc.credentials.createInsecure() +); + +function subscribeToDlt() { + const request = { + // Define any necessary subscription filters here if applicable + }; + + const call = client.SubscribeToDlt(request); + + call.on('data', (event) => { + console.log('Received event:', event); + }); + + call.on('error', (error) => { + console.error('Error:', error.message); + }); + + call.on('end', () => { + console.log('Stream ended.'); + }); + + // Optionally, you can cancel the subscription after a certain time or condition + setTimeout(() => { + console.log('Cancelling subscription...'); + call.cancel(); + }, 600000); // Cancel after 1 minute for demonstration purposes +} + +function runTests() { + console.log("Testing subscription to DLT events"); + subscribeToDlt(); +} + +runTests(); diff --git a/src/dlt/gateway/tests/testGateway.js b/src/dlt/gateway/tests/testGateway.js new file mode 100644 index 0000000000000000000000000000000000000000..b08f648daf6f257f44ec94cd13dabccce67cd803 --- /dev/null +++ b/src/dlt/gateway/tests/testGateway.js @@ -0,0 +1,126 @@ +// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const path = require('path'); +const fs = require('fs').promises; +const { v4: uuidv4 } = require('uuid'); // Import the UUID library + + +const PROTO_PATH = path.resolve(__dirname, '../../../../proto/dlt_gateway.proto'); +const packageDefinition = protoLoader.loadSync(PROTO_PATH, { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, +}); +const dltProto = grpc.loadPackageDefinition(packageDefinition).dlt; + +const client = new dltProto.DltGatewayService( + '127.0.0.1:32001', // Replace with TFS server IP_ADDRESS + grpc.credentials.createInsecure() +); + +const assetId = `asset-${Date.now()}`; +const domainUuid = `domain-${uuidv4()}`; // Generate a pretty domain UUID + +async function getTopoData(filename) { + try { + const data = await fs.readFile(`../samples/${filename}`, 'utf8'); + return data; + } catch (error) { + console.error('Failed to read file:', filename, error); + return '{}'; // Empty JSON if error + } +} + +async function processTopoData(operation, assetId, jsonFilename) { + let jsonData = '{}'; + if (jsonFilename) { + jsonData = await getTopoData(jsonFilename); + } + + const request = { + record_id: { + domain_uuid: { uuid: domainUuid }, // Replace "domain-uuid" with actual domain UUID if needed + type: 'DLTRECORDTYPE_TOPOLOGY', // Use the appropriate type if needed + record_uuid: { uuid: assetId } + }, + operation, + data_json: jsonData + }; + + return new Promise((resolve, reject) => { + client.RecordToDlt(request, (error, response) => { + if (error) { + console.error('Error:', error.message); + reject(error); + } else { + console.log('Response:', response); + resolve(response); + } + }); + }); +} + +async function getDLTData(assetId) { + + const request = { + domain_uuid: { uuid: domainUuid }, // Replace "domain-uuid" with actual domain UUID if needed + type: 'DLTRECORDTYPE_TOPOLOGY', // Use the appropriate type if needed + record_uuid: { uuid: assetId } + }; + + return new Promise((resolve, reject) => { + client.GetFromDlt(request, (error, response) => { + if (error) { + console.error('Error:', error.message); + reject(error); + } else { + console.log('Response:', response); + resolve(response); + } + }); + }); +} + +async function runTests() { + console.log("Testing Store Operation"); + await processTopoData('DLTRECORDOPERATION_ADD', assetId, 'topo2.json'); + + console.log("Testing Update Operation"); + await processTopoData('DLTRECORDOPERATION_UPDATE', assetId, 'topo3.json'); + + console.log("Testing Fetch Operation"); + await getDLTData(assetId); + + + console.log("Testing Delete Operation"); + await processTopoData('DLTRECORDOPERATION_DELETE', assetId); + + console.log("Testing Fetch All Operation"); + // This part assumes you have a GetAllInfo method implemented in your chaincode and corresponding gRPC service. + // client.GetAllInfo({}, (error, response) => { + // if (error) { + // console.error('Error:', error.message); + // } else { + // console.log('All Data:', response); + // } + // }); +} + +runTests().catch(console.error); diff --git a/src/forecaster/requirements.in b/src/forecaster/requirements.in index 6caa5d616f7b7efc525eb5d79a607b4005d0c4ac..9a31513799fd6aa5d915fb6c83a516176f290ce9 100644 --- a/src/forecaster/requirements.in +++ b/src/forecaster/requirements.in @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -#numpy==1.23.* +numpy<2.0.0 pandas==1.5.* #prophet==1.1.* scikit-learn==1.1.* diff --git a/src/interdomain/Config.py b/src/interdomain/Config.py index 918f60d79d51ee8f9fe3875b630de2c373c01a5a..d9098447bb107a878acf297d66eaf1573d72691d 100644 --- a/src/interdomain/Config.py +++ b/src/interdomain/Config.py @@ -15,6 +15,7 @@ from common.Settings import get_setting SETTING_NAME_TOPOLOGY_ABSTRACTOR = 'TOPOLOGY_ABSTRACTOR' +SETTING_NAME_DLT_INTEGRATION = 'DLT_INTEGRATION' TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'} def is_topology_abstractor_enabled() -> bool: @@ -22,3 +23,9 @@ def is_topology_abstractor_enabled() -> bool: if is_enabled is None: return False str_is_enabled = str(is_enabled).upper() return str_is_enabled in TRUE_VALUES + +def is_dlt_enabled() -> bool: + is_enabled = get_setting(SETTING_NAME_DLT_INTEGRATION, default=None) + if is_enabled is None: return False + str_is_enabled = str(is_enabled).upper() + return str_is_enabled in TRUE_VALUES diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py index 54878be2ed70fa834692b85ce27337d61160948b..bce5e69203417e9026ddabc336a260216d0fd991 100644 --- a/src/interdomain/service/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/InterdomainServiceServicerImpl.py @@ -34,7 +34,7 @@ from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient -from dlt.connector.client.DltConnectorClient import DltConnectorClient +from dlt.connector.client.DltConnectorClientAsync import DltConnectorClientAsync from pathcomp.frontend.client.PathCompClient import PathCompClient from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient @@ -88,7 +88,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): ]) if len(env_vars) == 2: # DLT available - dlt_connector_client = DltConnectorClient() + dlt_connector_client = DltConnectorClientAsync() dlt_connector_client.connect() else: dlt_connector_client = None diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py index c0497bd2902080f8b967ba50ce370a4c9b711689..7ab9682c2789ed11c4f066fa4b453e8c1212878b 100644 --- a/src/interdomain/service/__main__.py +++ b/src/interdomain/service/__main__.py @@ -18,8 +18,9 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables) -from interdomain.Config import is_topology_abstractor_enabled +from interdomain.Config import is_dlt_enabled, is_topology_abstractor_enabled from .topology_abstractor.TopologyAbstractor import TopologyAbstractor +from .topology_abstractor.DltRecorder import DLTRecorder from .InterdomainService import InterdomainService from .RemoteDomainClients import RemoteDomainClients @@ -69,12 +70,21 @@ def main(): topology_abstractor = TopologyAbstractor() topology_abstractor.start() + # Subscribe to Context Events + dlt_enabled = is_dlt_enabled() + if dlt_enabled: + LOGGER.info('Starting DLT functionality...') + dlt_recorder = DLTRecorder() + dlt_recorder.start() + # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') if topology_abstractor_enabled: topology_abstractor.stop() + if dlt_enabled: + dlt_recorder.stop() grpc_service.stop() remote_domain_clients.stop() diff --git a/src/interdomain/service/topology_abstractor/DltRecordSender.py b/src/interdomain/service/topology_abstractor/DltRecordSender.py index a504fe01b4e5755b880b76114c60d28f186f3f6f..5c53edc598ee57b856ebea3bd1eaf2e855b0cf6b 100644 --- a/src/interdomain/service/topology_abstractor/DltRecordSender.py +++ b/src/interdomain/service/topology_abstractor/DltRecordSender.py @@ -12,24 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -from typing import Dict, List, Optional, Tuple +import asyncio, logging +from typing import Dict, List, Tuple from common.proto.context_pb2 import Device, Link, Service, Slice, TopologyId from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from context.client.ContextClient import ContextClient -from dlt.connector.client.DltConnectorClient import DltConnectorClient -from .Types import DltRecordTypes +from dlt.connector.client.DltConnectorClientAsync import DltConnectorClientAsync LOGGER = logging.getLogger(__name__) class DltRecordSender: - def __init__(self, context_client : ContextClient, dlt_connector_client : Optional[DltConnectorClient]) -> None: + def __init__(self, context_client : ContextClient) -> None: self.context_client = context_client - self.dlt_connector_client = dlt_connector_client + LOGGER.debug('Creating Servicer...') + self.dlt_connector_client = DltConnectorClientAsync() + LOGGER.debug('Servicer Created') self.dlt_record_uuids : List[str] = list() - self.dlt_record_uuid_to_data : Dict[str, Tuple[TopologyId, DltRecordTypes]] = dict() + self.dlt_record_uuid_to_data : Dict[str, Tuple[TopologyId, object]] = dict() - def _add_record(self, record_uuid : str, data : Tuple[TopologyId, DltRecordTypes]) -> None: + async def initialize(self): + await self.dlt_connector_client.connect() + + def _add_record(self, record_uuid : str, data : Tuple[TopologyId, object]) -> None: if record_uuid in self.dlt_record_uuid_to_data: return self.dlt_record_uuid_to_data[record_uuid] = data self.dlt_record_uuids.append(record_uuid) @@ -60,36 +64,45 @@ class DltRecordSender: record_uuid = '{:s}:slice:{:s}/{:s}'.format(topology_uuid, context_uuid, slice_uuid) self._add_record(record_uuid, (topology_id, slice_)) - def commit(self) -> None: + async def commit(self) -> None: + if not self.dlt_connector_client: + LOGGER.error('DLT Connector Client is None, cannot commit records.') + return + + tasks = [] # List to hold all the async tasks + for dlt_record_uuid in self.dlt_record_uuids: - topology_id,dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid] + topology_id, dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid] if isinstance(dlt_record, Device): - device_id = self.context_client.SetDevice(dlt_record) + device_id = dlt_record.device_id if self.dlt_connector_client is None: continue dlt_device_id = DltDeviceId() dlt_device_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_device_id.device_id.CopyFrom(device_id) # pylint: disable=no-member - self.dlt_connector_client.RecordDevice(dlt_device_id) + tasks.append(self.dlt_connector_client.RecordDevice(dlt_device_id)) elif isinstance(dlt_record, Link): - link_id = self.context_client.SetLink(dlt_record) + link_id = dlt_record.link_id if self.dlt_connector_client is None: continue dlt_link_id = DltLinkId() dlt_link_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_link_id.link_id.CopyFrom(link_id) # pylint: disable=no-member - self.dlt_connector_client.RecordLink(dlt_link_id) + tasks.append(self.dlt_connector_client.RecordLink(dlt_link_id)) elif isinstance(dlt_record, Service): - service_id = self.context_client.SetService(dlt_record) + service_id = dlt_record.service_id if self.dlt_connector_client is None: continue dlt_service_id = DltServiceId() dlt_service_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_service_id.service_id.CopyFrom(service_id) # pylint: disable=no-member - self.dlt_connector_client.RecordService(dlt_service_id) + tasks.append(self.dlt_connector_client.RecordService(dlt_service_id)) elif isinstance(dlt_record, Slice): - slice_id = self.context_client.SetSlice(dlt_record) + slice_id = dlt_record.slice_id if self.dlt_connector_client is None: continue dlt_slice_id = DltSliceId() dlt_slice_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member - self.dlt_connector_client.RecordSlice(dlt_slice_id) + tasks.append(self.dlt_connector_client.RecordSlice(dlt_slice_id)) else: - LOGGER.error('Unsupported Record({:s})'.format(str(dlt_record))) + LOGGER.error(f'Unsupported Record({str(dlt_record)})') + + if tasks: + await asyncio.gather(*tasks) # Run all the tasks concurrently diff --git a/src/interdomain/service/topology_abstractor/DltRecorder.py b/src/interdomain/service/topology_abstractor/DltRecorder.py new file mode 100644 index 0000000000000000000000000000000000000000..22c436363b009810815b1cf3fa011fd5cbbc6a13 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/DltRecorder.py @@ -0,0 +1,206 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, threading, asyncio, time +from typing import Dict, Optional +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME +from common.proto.context_pb2 import ( + ContextEvent, ContextId, DeviceEvent, DeviceId, LinkId, LinkEvent, TopologyId, TopologyEvent +) +from common.tools.context_queries.Context import create_context +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from .DltRecordSender import DltRecordSender +from .Types import EventTypes + +LOGGER = logging.getLogger(__name__) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) +INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id=ADMIN_CONTEXT_ID)) + + +class DLTRecorder(threading.Thread): + def __init__(self) -> None: + super().__init__(daemon=True) + self.terminate = threading.Event() + self.context_client = ContextClient() + self.context_event_collector = EventsCollector(self.context_client) + self.topology_cache: Dict[str, TopologyId] = {} + + # Queues for each event type + self.create_event_queue = asyncio.Queue() + self.update_event_queue = asyncio.Queue() + self.remove_event_queue = asyncio.Queue() + + def stop(self): + self.terminate.set() + + def run(self) -> None: + asyncio.run(self._run()) + + async def _run(self) -> None: + self.context_client.connect() + create_context(self.context_client, DEFAULT_CONTEXT_NAME) + #self.create_topologies() + self.context_event_collector.start() + + batch_timeout = 1 # Time in seconds to wait before processing whatever tasks are available + last_task_time = time.time() + + while not self.terminate.is_set(): + event = self.context_event_collector.get_event(timeout=0.1) + if event: + LOGGER.info('Received Event({:s})...'.format(grpc_message_to_json_string(event))) + + # Prioritize the event based on its type + if event.event.event_type == 1: # CREATE + await self.create_event_queue.put(event) + elif event.event.event_type == 2: # UPDATE + await self.update_event_queue.put(event) + elif event.event.event_type == 3: # REMOVE + await self.remove_event_queue.put(event) + + # Check if it's time to process the tasks or if we have enough tasks + current_time = time.time() + if current_time - last_task_time >= batch_timeout: + await self.process_events() + last_task_time = current_time # Reset the timer after processing + + self.context_event_collector.stop() + self.context_client.close() + + async def process_events(self): + # Process CREATE events first + await self.process_queue(self.create_event_queue) + # Then process UPDATE events + await self.process_queue(self.update_event_queue) + # Finally, process REMOVE events + await self.process_queue(self.remove_event_queue) + + async def process_queue(self, queue : asyncio.Queue): + tasks = [] + while not queue.empty(): + event = await queue.get() + LOGGER.info('Processing Event({:s}) from queue...'.format(grpc_message_to_json_string(event))) + task = asyncio.create_task(self.update_record(event)) + tasks.append(task) + + # Execute tasks concurrently + if tasks: + try: + await asyncio.gather(*tasks) + except Exception as e: + LOGGER.error(f"Error while processing tasks: {e}") + + async def update_record(self, event : EventTypes) -> None: + dlt_record_sender = DltRecordSender(self.context_client) + await dlt_record_sender.initialize() # Ensure DltRecordSender is initialized asynchronously + LOGGER.debug('STARTING processing event: {:s}'.format(grpc_message_to_json_string(event))) + + if isinstance(event, ContextEvent): + LOGGER.debug('Processing ContextEvent({:s})'.format(grpc_message_to_json_string(event))) + LOGGER.warning('Ignoring ContextEvent({:s})'.format(grpc_message_to_json_string(event))) + + elif isinstance(event, TopologyEvent): + LOGGER.debug('Processing TopologyEvent({:s})'.format(grpc_message_to_json_string(event))) + self.process_topology_event(event, dlt_record_sender) + + elif isinstance(event, DeviceEvent): + LOGGER.debug('Processing DeviceEvent ASYNC({:s})'.format(grpc_message_to_json_string(event))) + self.process_device_event(event, dlt_record_sender) + + elif isinstance(event, LinkEvent): + LOGGER.debug('Processing LinkEvent({:s})'.format(grpc_message_to_json_string(event))) + self.process_link_event(event, dlt_record_sender) + + else: + LOGGER.warning('Unsupported Event({:s})'.format(grpc_message_to_json_string(event))) + + await dlt_record_sender.commit() + #await asyncio.sleep(2) # Simulates processing time + LOGGER.debug('Finished processing event: {:s}'.format(grpc_message_to_json_string(event))) + + + def process_topology_event(self, event : TopologyEvent, dlt_record_sender : DltRecordSender) -> None: + topology_id = event.topology_id + topology_uuid = topology_id.topology_uuid.uuid + context_id = topology_id.context_id + context_uuid = context_id.context_uuid.uuid + topology_uuids = {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME} + + context = self.context_client.GetContext(context_id) + context_name = context.name + + topology_details = self.context_client.GetTopologyDetails(topology_id) + topology_name = topology_details.name + + self.topology_cache[topology_uuid] = topology_id + + LOGGER.debug('TOPOLOGY Details({:s})'.format(grpc_message_to_json_string(topology_details))) + + if ((context_uuid == DEFAULT_CONTEXT_NAME) or (context_name == DEFAULT_CONTEXT_NAME)) and \ + (topology_uuid not in topology_uuids) and (topology_name not in topology_uuids): + LOGGER.debug('DEVICES({:s})'.format(grpc_message_to_json_string(topology_details.devices))) + for device in topology_details.devices: + LOGGER.debug('DEVICE_INFO_TOPO({:s})'.format(grpc_message_to_json_string(device))) + dlt_record_sender.add_device(topology_id, device) + + for link in topology_details.links: + dlt_record_sender.add_link(topology_id, link) + + else: + MSG = 'Ignoring ({:s}/{:s})({:s}/{:s}) TopologyEvent({:s})' + args = context_uuid, context_name, topology_uuid, topology_name, grpc_message_to_json_string(event) + LOGGER.warning(MSG.format(*args)) + + def find_topology_for_device(self, device_id : DeviceId) -> Optional[TopologyId]: + for topology_uuid, topology_id in self.topology_cache.items(): + details = self.context_client.GetTopologyDetails(topology_id) + for device in details.devices: + if device.device_id == device_id: + return topology_id + return None + + def find_topology_for_link(self, link_id : LinkId) -> Optional[TopologyId]: + for topology_uuid, topology_id in self.topology_cache.items(): + details = self.context_client.GetTopologyDetails(topology_id) + for link in details.links: + if link.link_id == link_id: + return topology_id + return None + + def process_device_event(self, event : DeviceEvent, dlt_record_sender : DltRecordSender) -> None: + device_id = event.device_id + device = self.context_client.GetDevice(device_id) + topology_id = self.find_topology_for_device(device_id) + if topology_id: + LOGGER.debug('DEVICE_INFO({:s}), DEVICE_ID ({:s})'.format( + str(device.device_id.device_uuid.uuid), + grpc_message_to_json_string(device_id) + )) + dlt_record_sender.add_device(topology_id, device) + else: + LOGGER.warning("Topology not found for device {:s}".format(str(device_id.device_uuid.uuid))) + + def process_link_event(self, event: LinkEvent, dlt_record_sender: DltRecordSender) -> None: + link_id = event.link_id + link = self.context_client.GetLink(link_id) + topology_id = self.find_topology_for_link(link_id) + if topology_id: + dlt_record_sender.add_link(topology_id, link) + else: + LOGGER.warning("Topology not found for link {:s}".format(str(link_id.link_uuid.uuid))) diff --git a/src/kpi_manager/.gitlab-ci.yml b/src/kpi_manager/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..498cfd89fb3da85fec1b2ad0c930408eab215dc5 --- /dev/null +++ b/src/kpi_manager/.gitlab-ci.yml @@ -0,0 +1,131 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build kpi-manager: + variables: + IMAGE_NAME: 'kpi_manager' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile . + - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + +# Apply unit test to the component +unit_test kpi-manager: + variables: + IMAGE_NAME: 'kpi_manager' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build kpi-manager + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi + - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi + - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker pull "cockroachdb/cockroach:latest-v22.2" + - docker volume create crdb + - > + docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 + --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 + --volume "crdb:/cockroach/cockroach-data" + cockroachdb/cockroach:latest-v22.2 start-single-node + - echo "Waiting for initialization..." + - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done + - docker logs crdb + - docker ps -a + - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $CRDB_ADDRESS + - > + docker run --name $IMAGE_NAME -d -p 30010:30010 + --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - docker ps -a + - sleep 5 + - docker logs $IMAGE_NAME + - > + docker exec -i $IMAGE_NAME bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py" + - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker volume rm -f crdb + - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + - src/$IMAGE_NAME/tests/Dockerfile + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml + +## Deployment of the service in Kubernetes Cluster +#deploy context: +# variables: +# IMAGE_NAME: 'context' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test context +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/kpi_manager/Dockerfile b/src/kpi_manager/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a57957759a32b45b715e327b54ebe004a6edf265 --- /dev/null +++ b/src/kpi_manager/Dockerfile @@ -0,0 +1,68 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/kpi_manager +WORKDIR /var/teraflow/kpi_manager +COPY src/kpi_manager/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/kpi_manager/. kpi_manager/ + +# Start the service +ENTRYPOINT ["python", "-m", "kpi_manager.service"] diff --git a/src/kpi_manager/README.md b/src/kpi_manager/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6e9b56d9349aa6acd5c41004e32c933619a37f65 --- /dev/null +++ b/src/kpi_manager/README.md @@ -0,0 +1,24 @@ +# How to locally run and test KPI manager micro-service + +### Pre-requisets +Ensure the following requirements are met before executing the KPI management service: + +1. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/requirements.in) sucessfully installed. +2. Verify the creation of required database and table. The +[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_db.py) python file lists the functions to create tables and the database. The +[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/database/KpiEngine.py) file contains the DB string. + +### Messages format templates +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_messages.py) python file contains templates for creating gRPC messages. + +### Unit test file +The ["KPI manager test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_manager.py) python file lists various tests conducted to validate functionality. + +### Flow of execution (Kpi Maanager Service functions) +1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table. + +2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor to the `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types. + +3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from the DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from the DB. + +4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types. diff --git a/src/kpi_manager/__init__.py b/src/kpi_manager/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_manager/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_manager/client/KpiManagerClient.py b/src/kpi_manager/client/KpiManagerClient.py new file mode 100755 index 0000000000000000000000000000000000000000..672d82f2d78ea8b477429c5ba03fbb4331bae7c7 --- /dev/null +++ b/src/kpi_manager/client/KpiManagerClient.py @@ -0,0 +1,77 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc + +from common.proto.context_pb2 import Empty +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceStub +from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 10 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class KpiManagerClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.KPIMANAGER) + if not port: port = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) + + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = KpiManagerServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def SetKpiDescriptor(self, request : KpiDescriptor) -> KpiId: + LOGGER.debug('SetKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SetKpiDescriptor(request) + LOGGER.debug('SetKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def DeleteKpiDescriptor(self,request : KpiId) -> Empty: + LOGGER.debug('DeleteKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.DeleteKpiDescriptor(request) + LOGGER.debug('DeleteKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def GetKpiDescriptor(self, request : KpiId) -> KpiDescriptor: + LOGGER.debug('GetKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.GetKpiDescriptor(request) + LOGGER.debug('GetKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SelectKpiDescriptor(self, filter : KpiDescriptorFilter) -> KpiDescriptorList: + LOGGER.debug('SelectKpiDescriptor: {:s}'.format(grpc_message_to_json_string(filter))) + response = self.stub.SelectKpiDescriptor(filter) + LOGGER.debug('SelectKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/kpi_manager/client/__init__.py b/src/kpi_manager/client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..48f7d354a2f3fe6e91bb79b3ca956f68c36ed9e3 --- /dev/null +++ b/src/kpi_manager/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_manager/database/KpiEngine.py b/src/kpi_manager/database/KpiEngine.py new file mode 100644 index 0000000000000000000000000000000000000000..0fce7e3d36cf2f03a18f311c815719a4f17b2869 --- /dev/null +++ b/src/kpi_manager/database/KpiEngine.py @@ -0,0 +1,40 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sqlalchemy +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' + +class KpiEngine: + @staticmethod + def get_engine() -> sqlalchemy.engine.Engine: + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = 'tfs_kpi_mgmt' # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) + try: + engine = sqlalchemy.create_engine(crdb_uri, echo=False) + LOGGER.info(' KpiDBmanager initalized with DB URL: {:}'.format(crdb_uri)) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) + return None # type: ignore + return engine diff --git a/src/kpi_manager/database/KpiModel.py b/src/kpi_manager/database/KpiModel.py new file mode 100644 index 0000000000000000000000000000000000000000..5c2fdff0664883bcc727096ddeda562fdbe3085d --- /dev/null +++ b/src/kpi_manager/database/KpiModel.py @@ -0,0 +1,84 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy import Column, Integer, String, Text +from sqlalchemy.orm import registry +from common.proto.kpi_manager_pb2 import KpiDescriptor + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +# Create a base class for declarative models +Base = registry().generate_base() + +class Kpi(Base): + __tablename__ = 'kpi' + + kpi_id = Column(UUID(as_uuid=False), primary_key=True) + kpi_description = Column(Text , nullable=False) + kpi_sample_type = Column(Integer , nullable=False) + device_id = Column(String , nullable=False) + endpoint_id = Column(String , nullable=False) + service_id = Column(String , nullable=False) + slice_id = Column(String , nullable=False) + connection_id = Column(String , nullable=False) + link_id = Column(String , nullable=False) + + # helps in logging the information + def __repr__(self): + return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', " + f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', " + f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', " + f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', " + f"link_id='{self.link_id}')>") + + @classmethod + def convert_KpiDescriptor_to_row(cls, request): + """ + Create an instance of Kpi from a request object. + Args: request: The request object containing the data. + Returns: An instance of Kpi initialized with data from the request. + """ + return cls( + kpi_id = request.kpi_id.kpi_id.uuid, + kpi_description = request.kpi_description, + kpi_sample_type = request.kpi_sample_type, + device_id = request.device_id.device_uuid.uuid, + endpoint_id = request.endpoint_id.endpoint_uuid.uuid, + service_id = request.service_id.service_uuid.uuid, + slice_id = request.slice_id.slice_uuid.uuid, + connection_id = request.connection_id.connection_uuid.uuid, + link_id = request.link_id.link_uuid.uuid + ) + + @classmethod + def convert_row_to_KpiDescriptor(cls, row): + """ + Create and return a dictionary representation of a Kpi instance. + Args: row: The Kpi instance (row) containing the data. + Returns: KpiDescriptor object + """ + response = KpiDescriptor() + response.kpi_id.kpi_id.uuid = row.kpi_id + response.kpi_description = row.kpi_description + response.kpi_sample_type = row.kpi_sample_type + response.service_id.service_uuid.uuid = row.service_id + response.device_id.device_uuid.uuid = row.device_id + response.slice_id.slice_uuid.uuid = row.slice_id + response.endpoint_id.endpoint_uuid.uuid = row.endpoint_id + response.connection_id.connection_uuid.uuid = row.connection_id + response.link_id.link_uuid.uuid = row.link_id + return response diff --git a/src/kpi_manager/database/Kpi_DB.py b/src/kpi_manager/database/Kpi_DB.py new file mode 100644 index 0000000000000000000000000000000000000000..49ad9c9b579daa918818366a1d9505089968edc2 --- /dev/null +++ b/src/kpi_manager/database/Kpi_DB.py @@ -0,0 +1,154 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sqlalchemy_utils +from sqlalchemy.orm import sessionmaker +from kpi_manager.database.KpiEngine import KpiEngine +from kpi_manager.database.KpiModel import Kpi as KpiModel +from common.method_wrappers.ServiceExceptions import ( + AlreadyExistsException, OperationFailedException , NotFoundException) + +LOGGER = logging.getLogger(__name__) +DB_NAME = "tfs_kpi_mgmt" + +class KpiDB: + def __init__(self): + self.db_engine = KpiEngine.get_engine() + if self.db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return False + self.db_name = DB_NAME + self.Session = sessionmaker(bind=self.db_engine) + + def create_database(self) -> None: + if not sqlalchemy_utils.database_exists(self.db_engine.url): + sqlalchemy_utils.create_database(self.db_engine.url) + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) + + def drop_database(self) -> None: + if sqlalchemy_utils.database_exists(self.db_engine.url): + sqlalchemy_utils.drop_database(self.db_engine.url) + + def create_tables(self): + # TODO: use "get_tables(declatrative class obj)" method of "sqlalchemy_utils" to verify tables. + try: + KpiModel.metadata.create_all(self.db_engine) # type: ignore + LOGGER.debug("Tables created in the DB Name: {:}".format(self.db_name)) + except Exception as e: + LOGGER.debug("Tables cannot be created in the kpi database. {:s}".format(str(e))) + raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)]) + + def verify_tables(self): + try: + with self.db_engine.connect() as connection: + result = connection.execute("SHOW TABLES;") + tables = result.fetchall() # type: ignore + LOGGER.debug("Tables verified: {:}".format(tables)) + except Exception as e: + LOGGER.debug("Unable to fetch Table names. {:s}".format(str(e))) + + def add_row_to_db(self, row): + session = self.Session() + try: + session.add(row) + session.commit() + LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.") + return True + except Exception as e: + session.rollback() + if "psycopg2.errors.UniqueViolation" in str(e): + LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") + raise AlreadyExistsException(row.__class__.__name__, row, extra_details=["Unique key voilation: {:}".format(e)] ) + else: + LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def search_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + entity = session.query(model).filter_by(**{col_name: id_to_search}).first() + if entity: + # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}") + return entity + else: + LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}") + print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) + return None + except Exception as e: + LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") + raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) + finally: + session.close() + + def delete_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + record = session.query(model).filter_by(**{col_name: id_to_search}).first() + if record: + session.delete(record) + session.commit() + LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) + else: + LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search) + return None + except Exception as e: + session.rollback() + LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def select_with_filter(self, model, filter_object): + session = self.Session() + try: + query = session.query(KpiModel) + # Apply filters based on the filter_object + if filter_object.kpi_id: + query = query.filter(KpiModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id])) + + if filter_object.kpi_sample_type: + query = query.filter(KpiModel.kpi_sample_type.in_(filter_object.kpi_sample_type)) + + if filter_object.device_id: + query = query.filter(KpiModel.device_id.in_([d.device_uuid.uuid for d in filter_object.device_id])) + + if filter_object.endpoint_id: + query = query.filter(KpiModel.endpoint_id.in_([e.endpoint_uuid.uuid for e in filter_object.endpoint_id])) + + if filter_object.service_id: + query = query.filter(KpiModel.service_id.in_([s.service_uuid.uuid for s in filter_object.service_id])) + + if filter_object.slice_id: + query = query.filter(KpiModel.slice_id.in_([s.slice_uuid.uuid for s in filter_object.slice_id])) + + if filter_object.connection_id: + query = query.filter(KpiModel.connection_id.in_([c.connection_uuid.uuid for c in filter_object.connection_id])) + + if filter_object.link_id: + query = query.filter(KpiModel.link_id.in_([l.link_uuid.uuid for l in filter_object.link_id])) + result = query.all() + + if result: + LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") # - Results: {result} + else: + LOGGER.debug(f"No matching row found in {model.__name__} table with filters: {filter_object}") + return result + except Exception as e: + LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}") + raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)]) + finally: + session.close() diff --git a/src/kpi_manager/database/__init__.py b/src/kpi_manager/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_manager/database/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_manager/requirements.in b/src/kpi_manager/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..3e98fef362277dbf60019902e115d1c733bea9e7 --- /dev/null +++ b/src/kpi_manager/requirements.in @@ -0,0 +1,18 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/kpi_manager/service/KpiManagerService.py b/src/kpi_manager/service/KpiManagerService.py new file mode 100755 index 0000000000000000000000000000000000000000..b69a926a94c6cf10a680fe1b15d065f6bc073c97 --- /dev/null +++ b/src/kpi_manager/service/KpiManagerService.py @@ -0,0 +1,29 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.service.GenericGrpcService import GenericGrpcService +from common.proto.kpi_manager_pb2_grpc import add_KpiManagerServiceServicer_to_server +from kpi_manager.service.KpiManagerServiceServicerImpl import KpiManagerServiceServicerImpl + + +class KpiManagerService(GenericGrpcService): + def __init__(self, cls_name: str = __name__) -> None: + port = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) + super().__init__(port, cls_name=cls_name) + self.kpiManagerService_servicer = KpiManagerServiceServicerImpl() + + def install_servicers(self): + add_KpiManagerServiceServicer_to_server(self.kpiManagerService_servicer, self.server) diff --git a/src/kpi_manager/service/KpiManagerServiceServicerImpl.py b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..fd22474829ea0dfb6b1a25e70bbb4d5440c0216b --- /dev/null +++ b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py @@ -0,0 +1,94 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, grpc +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.context_pb2 import Empty +from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceServicer +from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList +from kpi_manager.database.Kpi_DB import KpiDB +from kpi_manager.database.KpiModel import Kpi as KpiModel + +LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('KpiManager', 'NBIgRPC') + +class KpiManagerServiceServicerImpl(KpiManagerServiceServicer): + def __init__(self): + LOGGER.info('Init KpiManagerService') + self.kpi_db_obj = KpiDB() + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SetKpiDescriptor(self, request: KpiDescriptor, grpc_context: grpc.ServicerContext # type: ignore + ) -> KpiId: # type: ignore + response = KpiId() + LOGGER.info("Received gRPC message object: {:}".format(request)) + try: + kpi_to_insert = KpiModel.convert_KpiDescriptor_to_row(request) + if(self.kpi_db_obj.add_row_to_db(kpi_to_insert)): + response.kpi_id.uuid = request.kpi_id.kpi_id.uuid + # LOGGER.info("Added Row: {:}".format(response)) + return response + except Exception as e: + LOGGER.info("Unable to create KpiModel class object. {:}".format(e)) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def GetKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext # type: ignore + ) -> KpiDescriptor: # type: ignore + response = KpiDescriptor() + print("--> Received gRPC message object: {:}".format(request)) + LOGGER.info("Received gRPC message object: {:}".format(request)) + try: + kpi_id_to_search = request.kpi_id.uuid + row = self.kpi_db_obj.search_db_row_by_id(KpiModel, 'kpi_id', kpi_id_to_search) + if row is None: + print ('No matching row found for kpi id: {:}'.format(kpi_id_to_search)) + LOGGER.info('No matching row found kpi id: {:}'.format(kpi_id_to_search)) + return Empty() + else: + response = KpiModel.convert_row_to_KpiDescriptor(row) + return response + except Exception as e: + print ('Unable to search kpi id. {:}'.format(e)) + LOGGER.info('Unable to search kpi id. {:}'.format(e)) + raise e + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def DeleteKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext # type: ignore + ) -> Empty: # type: ignore + LOGGER.info("Received gRPC message object: {:}".format(request)) + try: + kpi_id_to_search = request.kpi_id.uuid + self.kpi_db_obj.delete_db_row_by_id(KpiModel, 'kpi_id', kpi_id_to_search) + except Exception as e: + LOGGER.info('Unable to search kpi id. {:}'.format(e)) + finally: + return Empty() + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SelectKpiDescriptor(self, filter: KpiDescriptorFilter, grpc_context: grpc.ServicerContext # type: ignore + ) -> KpiDescriptorList: # type: ignore + LOGGER.info("Received gRPC message object: {:}".format(filter)) + response = KpiDescriptorList() + try: + rows = self.kpi_db_obj.select_with_filter(KpiModel, filter) + except Exception as e: + LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e)) + try: + for row in rows: + kpiDescriptor_obj = KpiModel.convert_row_to_KpiDescriptor(row) + response.kpi_descriptor_list.append(kpiDescriptor_obj) + return response + except Exception as e: + LOGGER.info('Unable to process filter response {:}'.format(e)) diff --git a/src/kpi_manager/service/__init__.py b/src/kpi_manager/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_manager/service/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_manager/service/__main__.py b/src/kpi_manager/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..244d5afa373a6462a0382a0ed26a588088a689a1 --- /dev/null +++ b/src/kpi_manager/service/__main__.py @@ -0,0 +1,51 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from common.Settings import get_log_level +from .KpiManagerService import KpiManagerService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level) + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.debug('Starting...') + + grpc_service = KpiManagerService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.debug('Terminating...') + grpc_service.stop() + + LOGGER.debug('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/kpi_manager/tests/test_kpi_db.py b/src/kpi_manager/tests/test_kpi_db.py new file mode 100644 index 0000000000000000000000000000000000000000..d4a57f83664f851504389b3bbe99d5c2a92542d9 --- /dev/null +++ b/src/kpi_manager/tests/test_kpi_db.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from kpi_manager.database.Kpi_DB import KpiDB + +LOGGER = logging.getLogger(__name__) + +def test_verify_databases_and_Tables(): + LOGGER.info('>>> test_verify_Tables : START <<< ') + kpiDBobj = KpiDB() + # kpiDBobj.drop_database() + # kpiDBobj.verify_tables() + kpiDBobj.create_database() + kpiDBobj.create_tables() + kpiDBobj.verify_tables() diff --git a/src/kpi_manager/tests/test_kpi_manager.py b/src/kpi_manager/tests/test_kpi_manager.py new file mode 100755 index 0000000000000000000000000000000000000000..219fdadee9e2f4ca9ea9ac0be040043d4edfbdbe --- /dev/null +++ b/src/kpi_manager/tests/test_kpi_manager.py @@ -0,0 +1,147 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os, pytest +import logging +from typing import Union + +from common.proto.context_pb2 import Empty +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) +from common.tests.MockServicerImpl_Context import MockServicerImpl_Context +from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server + +from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList +from common.tools.service.GenericGrpcService import GenericGrpcService + +from kpi_manager.tests.test_messages import create_kpi_descriptor_request, create_kpi_filter_request, create_kpi_descriptor_request_a +from kpi_manager.service.KpiManagerService import KpiManagerService +from kpi_manager.client.KpiManagerClient import KpiManagerClient +from kpi_manager.tests.test_messages import create_kpi_descriptor_request +from kpi_manager.tests.test_messages import create_kpi_id_request + +########################### +# Tests Setup +########################### + +LOCAL_HOST = '127.0.0.1' + +KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) # type: ignore +os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT) + +LOGGER = logging.getLogger(__name__) + +class MockContextService(GenericGrpcService): + # Mock Service implementing Context to simplify unitary tests of Monitoring + + def __init__(self, bind_port: Union[str, int]) -> None: + super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') + + # pylint: disable=attribute-defined-outside-init + def install_servicers(self): + self.context_servicer = MockServicerImpl_Context() + add_ContextServiceServicer_to_server(self.context_servicer, self.server) + +# This fixture will be requested by test cases and last during testing session +@pytest.fixture(scope='session') +def kpi_manager_service(): + LOGGER.info('Initializing KpiManagerService...') + _service = KpiManagerService() + _service.start() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiManagerService...') + yield _service + + LOGGER.info('Terminating KpiManagerService...') + _service.stop() + + LOGGER.info('Terminated KpiManagerService...') + +# This fixture will be requested by test cases and last during testing session. +# The client requires the server, so client fixture has the server as dependency. +# def monitoring_client(monitoring_service : MonitoringService): (Add for better understanding) +@pytest.fixture(scope='session') +def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disable=redefined-outer-name,unused-argument + LOGGER.info('Initializing KpiManagerClient...') + _client = KpiManagerClient() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiManagerClient...') + yield _client + + LOGGER.info('Closing KpiManagerClient...') + _client.close() + + LOGGER.info('Closed KpiManagerClient...') + +################################################## +# Prepare Environment, should be the first test +################################################## + + +########################### +# Tests Implementation of Kpi Manager +########################### + +# ---------- 3rd Iteration Tests ---------------- +def test_SetKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") + response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) + +def test_DeleteKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ") + # adding KPI + response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # deleting KPI + del_response = kpi_manager_client.DeleteKpiDescriptor(response_id) + # select KPI + kpi_manager_client.GetKpiDescriptor(response_id) + LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response)) + assert isinstance(del_response, Empty) + +def test_GetKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") + # adding KPI + response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # get KPI + response = kpi_manager_client.GetKpiDescriptor(response_id) + LOGGER.info("Response gRPC message object: {:}".format(response)) + + LOGGER.info(" >>> calling GetKpiDescriptor with random ID") + rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request()) + LOGGER.info("Response gRPC message object: {:}".format(rand_response)) + + assert isinstance(response, KpiDescriptor) + +def test_SelectKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ") + # adding KPI + kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # select KPI(s) + response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request()) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiDescriptorList) + +def test_set_list_of_KPIs(kpi_manager_client): + LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ") + KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"] + # adding KPI + for kpi in KPIs_TO_SEARCH: + kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi)) diff --git a/src/kpi_manager/tests/test_messages.py b/src/kpi_manager/tests/test_messages.py new file mode 100644 index 0000000000000000000000000000000000000000..7b5c45859b6c10056211f9f33df950d9668c11ea --- /dev/null +++ b/src/kpi_manager/tests/test_messages.py @@ -0,0 +1,78 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.proto.context_pb2 import DeviceId, LinkId, ServiceId, SliceId,\ + ConnectionId, EndPointId + + +def create_kpi_id_request(): + _create_kpi_id = kpi_manager_pb2.KpiId() + _create_kpi_id.kpi_id.uuid = str(uuid.uuid4()) + return _create_kpi_id + +def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + _create_kpi_request.device_id.device_uuid.uuid = 'DEV2' + _create_kpi_request.service_id.service_uuid.uuid = 'SERV2' + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END1' + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' + _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' + return _create_kpi_request + +def create_kpi_descriptor_request_a(description: str = "Test Description"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_kpi_request.kpi_description = description + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + _create_kpi_request.device_id.device_uuid.uuid = 'DEV4' + _create_kpi_request.service_id.service_uuid.uuid = 'SERV3' + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC3' + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END2' + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2' + _create_kpi_request.link_id.link_uuid.uuid = 'LNK2' + return _create_kpi_request + +def create_kpi_filter_request(): + _create_kpi_filter_request = kpi_manager_pb2.KpiDescriptorFilter() + _create_kpi_filter_request.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED) + + device_id_obj = DeviceId() + service_id_obj = ServiceId() + slice_id_obj = SliceId() + endpoint_id_obj = EndPointId() + connection_id_obj = ConnectionId() + link_id_obj = LinkId() + + device_id_obj.device_uuid.uuid = "DEV2" + service_id_obj.service_uuid.uuid = "SERV2" + slice_id_obj.slice_uuid.uuid = "SLC1" + endpoint_id_obj.endpoint_uuid.uuid = "END1" + connection_id_obj.connection_uuid.uuid = "CON1" + link_id_obj.link_uuid.uuid = "LNK1" + + _create_kpi_filter_request.device_id.append(device_id_obj) + _create_kpi_filter_request.service_id.append(service_id_obj) + _create_kpi_filter_request.slice_id.append(slice_id_obj) + _create_kpi_filter_request.endpoint_id.append(endpoint_id_obj) + _create_kpi_filter_request.connection_id.append(connection_id_obj) + _create_kpi_filter_request.link_id.append(link_id_obj) + + return _create_kpi_filter_request \ No newline at end of file diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..1a6f821ba9e798bb4220d914109ab3a65f0f1792 --- /dev/null +++ b/src/kpi_value_api/.gitlab-ci.yml @@ -0,0 +1,129 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build kpi-value-api: + variables: + IMAGE_NAME: 'kpi_value_api' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile . + - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + +# Apply unit test to the component +unit_test kpi-value-api: + variables: + IMAGE_NAME: 'kpi_value_api' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build kpi-value-api + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME -d -p 30020:30020 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - sleep 5 + - docker ps -a + - docker logs $IMAGE_NAME + - > + docker exec -i $IMAGE_NAME bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py" + - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker rm -f $IMAGE_NAME + - docker network rm teraflowbridge + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + # - src/$IMAGE_NAME/tests/Dockerfile # mayne not needed + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml + +## Deployment of the service in Kubernetes Cluster +#deploy context: +# variables: +# IMAGE_NAME: 'context' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test context +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/kpi_value_api/Dockerfile b/src/kpi_value_api/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..25b8da931f88000dd229c536456a3eb1fa7f56db --- /dev/null +++ b/src/kpi_value_api/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/kpi_value_api +WORKDIR /var/teraflow/kpi_value_api +COPY src/kpi_value_api/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/kpi_value_api/. kpi_value_api/ +COPY src/kpi_manager/__init__.py kpi_manager/__init__.py +COPY src/kpi_manager/client/. kpi_manager/client/ + +# Start the service +ENTRYPOINT ["python", "-m", "kpi_value_api.service"] diff --git a/src/kpi_value_api/README.md b/src/kpi_value_api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..70ba2c5e79c79147e336307ecc6d5ddfc263df90 --- /dev/null +++ b/src/kpi_value_api/README.md @@ -0,0 +1,23 @@ +# How to locally run and test KPI Value API micro-service + +### Pre-requisets +Ensure the following requirements are met before executing the KPI Value API service. + +1. The KPI Manger service is running and Apache Kafka is running. + +2. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/requirements.in) file sucessfully installed. + +3. Call the ["create_all_topics()"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/common/tools/kafka/Variables.py) function to verify the existence of all required topics on kafka. + +### Messages format templates +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/messages.py) python file contains templates for creating gRPC messages. + +### Unit test file +The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/test_kpi_value_api.py) python file enlist various tests conducted to validate functionality. + +### Flow of execution (Kpi Maanager Service functions) +1. Call the `create_new_topic_if_not_exists(<list of string>)` method to create any new topics if needed. + +2. Call `StoreKpiValues(KpiValueList)` to produce `Kpi Value` on a Kafka Topic. (The `KpiValueWriter` microservice will consume and process the `Kpi Value`) + +3. Call `SelectKpiValues(KpiValueFilter) -> KpiValueList` to read metric from the Prometheus DB. diff --git a/src/kpi_value_api/__init__.py b/src/kpi_value_api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_value_api/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_value_api/client/KpiValueApiClient.py b/src/kpi_value_api/client/KpiValueApiClient.py new file mode 100644 index 0000000000000000000000000000000000000000..f432271cfb7c8136f72156330b25d0b82b934d99 --- /dev/null +++ b/src/kpi_value_api/client/KpiValueApiClient.py @@ -0,0 +1,63 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string + +from common.proto.context_pb2 import Empty +from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter +from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceStub + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 10 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class KpiValueApiClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.KPIVALUEAPI) + if not port: port = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = KpiValueAPIServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def StoreKpiValues(self, request: KpiValueList) -> Empty: + LOGGER.debug('StoreKpiValues: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.StoreKpiValues(request) + LOGGER.debug('StoreKpiValues result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SelectKpiValues(self, request: KpiValueFilter) -> KpiValueList: + LOGGER.debug('SelectKpiValues: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SelectKpiValues(request) + LOGGER.debug('SelectKpiValues result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/kpi_value_api/client/__init__.py b/src/kpi_value_api/client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_value_api/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_value_api/requirements.in b/src/kpi_value_api/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..f5695906a8d02d55e15960a76986b8d03f02dba1 --- /dev/null +++ b/src/kpi_value_api/requirements.in @@ -0,0 +1,17 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +confluent-kafka==2.3.* +requests==2.27.* +prometheus-api-client==0.5.3 \ No newline at end of file diff --git a/src/kpi_value_api/service/KpiValueApiService.py b/src/kpi_value_api/service/KpiValueApiService.py new file mode 100644 index 0000000000000000000000000000000000000000..68b6fbdc278a00aa7cf98385bcf8afa573f91445 --- /dev/null +++ b/src/kpi_value_api/service/KpiValueApiService.py @@ -0,0 +1,30 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.service.GenericGrpcService import GenericGrpcService +from .KpiValueApiServiceServicerImpl import KpiValueApiServiceServicerImpl +from common.proto.kpi_value_api_pb2_grpc import add_KpiValueAPIServiceServicer_to_server + + +class KpiValueApiService(GenericGrpcService): + def __init__(self, cls_name : str = __name__ ) -> None: + port = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI) + super().__init__(port, cls_name=cls_name) + self.kpiValueApiService_servicer = KpiValueApiServiceServicerImpl() + + def install_servicers(self): + add_KpiValueAPIServiceServicer_to_server(self.kpiValueApiService_servicer, self.server) diff --git a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..4ea978fafc8d7454d41f64182d553d030215113a --- /dev/null +++ b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py @@ -0,0 +1,160 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, grpc, json +from typing import Dict +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic + +from common.proto.context_pb2 import Empty +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId +from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceServicer +from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter, KpiValue, KpiValueType + +from confluent_kafka import Producer as KafkaProducer + +from prometheus_api_client import PrometheusConnect +from prometheus_api_client.utils import parse_datetime + +from kpi_manager.client.KpiManagerClient import KpiManagerClient + +LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('KpiValueAPI', 'NBIgRPC') +PROM_URL = "http://prometheus-k8s.monitoring.svc.cluster.local:9090" # TODO: updated with the env variables + +class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer): + def __init__(self): + LOGGER.debug('Init KpiValueApiService') + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StoreKpiValues(self, request: KpiValueList, grpc_context: grpc.ServicerContext + ) -> Empty: + LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request)) + + producer = self.kafka_producer + for kpi_value in request.kpi_value_list: + kpi_value_to_produce : Dict = { + "kpi_uuid" : kpi_value.kpi_id.kpi_id.uuid, + "timestamp" : kpi_value.timestamp.timestamp, + "kpi_value_type" : self.ExtractKpiValueByType(kpi_value.kpi_value_type) + } + LOGGER.debug('KPI to produce is {:}'.format(kpi_value_to_produce)) + msg_key = "gRPC-kpivalueapi" # str(__class__.__name__) can be used + + producer.produce( + KafkaTopic.VALUE.value, + key = msg_key, + value = json.dumps(kpi_value_to_produce), + callback = self.delivery_callback + ) + producer.flush() + return Empty() + + def ExtractKpiValueByType(self, value): + attributes = [ 'floatVal' , 'int32Val' , 'uint32Val','int64Val', + 'uint64Val', 'stringVal', 'boolVal'] + for attr in attributes: + try: + return getattr(value, attr) + except (ValueError, TypeError, AttributeError): + continue + return None + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SelectKpiValues(self, request: KpiValueFilter, grpc_context: grpc.ServicerContext + ) -> KpiValueList: + LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request)) + response = KpiValueList() + + kpi_manager_client = KpiManagerClient() + prom_connect = PrometheusConnect(url=PROM_URL) + + metrics = [self.GetKpiSampleType(kpi, kpi_manager_client) for kpi in request.kpi_id] + start_timestamps = [parse_datetime(timestamp) for timestamp in request.start_timestamp] + end_timestamps = [parse_datetime(timestamp) for timestamp in request.end_timestamp] + + prom_response = [] + for start_time, end_time in zip(start_timestamps, end_timestamps): + for metric in metrics: + print(start_time, end_time, metric) + LOGGER.debug(">>> Query: {:}".format(metric)) + prom_response.append( + prom_connect.custom_query_range( + query = metric, # this is the metric name and label config + start_time = start_time, + end_time = end_time, + step = 30, # or any other step value (missing in gRPC Filter request) + ) + ) + + for single_resposne in prom_response: + # print ("{:}".format(single_resposne)) + for record in single_resposne: + # print("Record >>> kpi: {:} >>> time & values set: {:}".format(record['metric']['__name__'], record['values'])) + for value in record['values']: + # print("{:} - {:}".format(record['metric']['__name__'], value)) + kpi_value = KpiValue() + kpi_value.kpi_id.kpi_id = record['metric']['__name__'], + kpi_value.timestamp = value[0], + kpi_value.kpi_value_type = self.ConverValueToKpiValueType(value[1]) + response.kpi_value_list.append(kpi_value) + return response + + def GetKpiSampleType(self, kpi_value: str, kpi_manager_client): + print("--- START -----") + + kpi_id = KpiId() + kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid + # print("KpiId generated: {:}".format(kpi_id)) + + try: + kpi_descriptor_object = KpiDescriptor() + kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id) + # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object??? + if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid: + LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + return KpiSampleType.Name(kpi_descriptor_object.kpi_sample_type) # extract and return the name of KpiSampleType + else: + LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + except Exception as e: + LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e)) + print ("Unable to get KpiDescriptor. Error: {:}".format(e)) + + def ConverValueToKpiValueType(self, value): + # Check if the value is an integer (int64) + try: + int_value = int(value) + return KpiValueType(int64Val=int_value) + except (ValueError, TypeError): + pass + # Check if the value is a float + try: + float_value = float(value) + return KpiValueType(floatVal=float_value) + except (ValueError, TypeError): + pass + # Check if the value is a boolean + if value.lower() in ['true', 'false']: + bool_value = value.lower() == 'true' + return KpiValueType(boolVal=bool_value) + # If none of the above, treat it as a string + return KpiValueType(stringVal=value) + + def delivery_callback(self, err, msg): + if err: LOGGER.debug('Message delivery failed: {:}'.format(err)) + else: LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) diff --git a/src/kpi_value_api/service/__init__.py b/src/kpi_value_api/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_value_api/service/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_value_api/service/__main__.py b/src/kpi_value_api/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0f265a48812c0ae475e4e079a09b83cdfb7c69e --- /dev/null +++ b/src/kpi_value_api/service/__main__.py @@ -0,0 +1,51 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from common.Settings import get_log_level +from .KpiValueApiService import KpiValueApiService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level) + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.debug('Starting...') + + grpc_service = KpiValueApiService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.debug('Terminating...') + grpc_service.stop() + + LOGGER.debug('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/kpi_value_api/tests/messages.py b/src/kpi_value_api/tests/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..d8ad14bd44eebc1e9412cfd5ff2973e6018c95e9 --- /dev/null +++ b/src/kpi_value_api/tests/messages.py @@ -0,0 +1,36 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid, time +from common.proto.kpi_value_api_pb2 import KpiValue, KpiValueList + + +def create_kpi_value_list(): + _create_kpi_value_list = KpiValueList() + # To run this experiment sucessfully, add an existing UUID of a KPI Descriptor from the KPI DB. + # This UUID is used to get the descriptor form the KPI DB. If the Kpi ID does not exists, + # some part of the code won't execute. + EXISTING_KPI_IDs = ["725ce3ad-ac67-4373-bd35-8cd9d6a86e09", + str(uuid.uuid4()), + str(uuid.uuid4())] + + for kpi_id_uuid in EXISTING_KPI_IDs: + kpi_value_object = KpiValue() + kpi_value_object.kpi_id.kpi_id.uuid = kpi_id_uuid + kpi_value_object.timestamp.timestamp = float(time.time()) + kpi_value_object.kpi_value_type.floatVal = 100 + + _create_kpi_value_list.kpi_value_list.append(kpi_value_object) + + return _create_kpi_value_list diff --git a/src/kpi_value_api/tests/test_kpi_value_api.py b/src/kpi_value_api/tests/test_kpi_value_api.py new file mode 100644 index 0000000000000000000000000000000000000000..307b5cdad4e6503a774e308f669fc44762f84bf1 --- /dev/null +++ b/src/kpi_value_api/tests/test_kpi_value_api.py @@ -0,0 +1,84 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os, logging, pytest +from common.proto.context_pb2 import Empty +from common.Constants import ServiceNameEnum +from common.tools.kafka.Variables import KafkaTopic +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) +from kpi_value_api.service.KpiValueApiService import KpiValueApiService +from kpi_value_api.client.KpiValueApiClient import KpiValueApiClient +from kpi_value_api.tests.messages import create_kpi_value_list + + +LOCAL_HOST = '127.0.0.1' +KPIVALUEAPI_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI) # type: ignore +os.environ[get_env_var_name(ServiceNameEnum.KPIVALUEAPI, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.KPIVALUEAPI, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIVALUEAPI_SERVICE_PORT) +LOGGER = logging.getLogger(__name__) + +# This fixture will be requested by test cases and last during testing session +@pytest.fixture(scope='session') +def kpi_value_api_service(): + LOGGER.info('Initializing KpiValueApiService...') + # _service = MonitoringService(name_mapping) + _service = KpiValueApiService() + _service.start() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiValueApiService...') + yield _service + + LOGGER.info('Terminating KpiValueApiService...') + _service.stop() + + LOGGER.info('Terminated KpiValueApiService...') + +# This fixture will be requested by test cases and last during testing session. +# The client requires the server, so client fixture has the server as dependency. +@pytest.fixture(scope='session') +def kpi_value_api_client(kpi_value_api_service : KpiValueApiService ): + LOGGER.info('Initializing KpiValueApiClient...') + _client = KpiValueApiClient() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiValueApiClient...') + yield _client + + LOGGER.info('Closing KpiValueApiClient...') + _client.close() + + LOGGER.info('Closed KpiValueApiClient...') + +################################################## +# Prepare Environment, should be the first test +################################################## + +# To be added here + +########################### +# Tests Implementation of Kpi Value Api +########################### + +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +def test_store_kpi_values(kpi_value_api_client): + LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ") + response = kpi_value_api_client.StoreKpiValues(create_kpi_value_list()) + assert isinstance(response, Empty) diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..9a2f9fd47e435b26e2e3a335bd9b95da58a0517f --- /dev/null +++ b/src/kpi_value_writer/.gitlab-ci.yml @@ -0,0 +1,131 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build kpi-value-writer: + variables: + IMAGE_NAME: 'kpi_value_writer' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile . + - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + +# Apply unit test to the component +unit_test kpi-value-writer: + variables: + IMAGE_NAME: 'kpi_value_writer' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build kpi-value-writer + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME -d -p 30030:30030 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - sleep 5 + - docker ps -a + - docker logs $IMAGE_NAME + - > + docker exec -i $IMAGE_NAME bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py" + - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker rm -f $IMAGE_NAME + - docker rm -f zookeeper + - docker rm -f kafka + - docker network rm teraflowbridge + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + - src/$IMAGE_NAME/tests/Dockerfile + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml + +## Deployment of the service in Kubernetes Cluster +#deploy context: +# variables: +# IMAGE_NAME: 'context' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test context +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/kpi_value_writer/Dockerfile b/src/kpi_value_writer/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..70f41128bd8c982f604a3424d2096c918ead080e --- /dev/null +++ b/src/kpi_value_writer/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/kpi_value_writer +WORKDIR /var/teraflow/kpi_value_writer +COPY src/kpi_value_writer/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/kpi_value_writer/. kpi_value_writer/ +COPY src/kpi_manager/__init__.py kpi_manager/__init__.py +COPY src/kpi_manager/client/. kpi_manager/client/ + +# Start the service +ENTRYPOINT ["python", "-m", "kpi_value_writer.service"] diff --git a/src/kpi_value_writer/README.md b/src/kpi_value_writer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c45a0e39534fae9efef4174d5ca5be7047845c48 --- /dev/null +++ b/src/kpi_value_writer/README.md @@ -0,0 +1,17 @@ +# How to locally run and test the KPI Value Writer micro-service + +### Pre-requisets +Ensure the following requirements are meet before executing the KPI Value Writer service> + +1. The KPI Manger and KPI Value API services are running and Apache Kafka is running. + +2. A Virtual enviornment exist with all the required packages listed in the ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/requirements.in) file installed sucessfully. + +### Messages format templates +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_messages.py) python file contains the templates to create gRPC messages. + +### Unit test file +The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_kpi_value_writer.py) python file enlist various tests conducted to validate functionality. + +### Flow of execution +1. Call the `RunKafkaConsumer` method from the `KpiValueWriter` class to start consuming the `KPI Value` generated by the `KPI Value API` or `Telemetry`. For every valid `KPI Value` consumer from Kafka, it invokes the `PrometheusWriter` class to prepare and push the metric to the Promethues DB. diff --git a/src/kpi_value_writer/__init__.py b/src/kpi_value_writer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_value_writer/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_value_writer/requirements.in b/src/kpi_value_writer/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..7e4694109dc4e1d31b86abfc03162494faafcdaf --- /dev/null +++ b/src/kpi_value_writer/requirements.in @@ -0,0 +1,16 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +confluent-kafka==2.3.* +requests==2.27.* diff --git a/src/kpi_value_writer/service/KpiValueWriter.py b/src/kpi_value_writer/service/KpiValueWriter.py new file mode 100644 index 0000000000000000000000000000000000000000..8b258a1424cc44be4dcb9134ee913c707cc44bfa --- /dev/null +++ b/src/kpi_value_writer/service/KpiValueWriter.py @@ -0,0 +1,98 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import threading +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from common.proto.kpi_value_api_pb2 import KpiValue +from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId +from common.Settings import get_service_port_grpc +from common.Constants import ServiceNameEnum +from common.tools.service.GenericGrpcService import GenericGrpcService + + +from confluent_kafka import KafkaError +from confluent_kafka import Consumer as KafkaConsumer + +from kpi_manager.client.KpiManagerClient import KpiManagerClient +# -- test import -- +# from kpi_value_writer.tests.test_messages import create_kpi_descriptor_request +from .MetricWriterToPrometheus import MetricWriterToPrometheus + + +LOGGER = logging.getLogger(__name__) +ACTIVE_CONSUMERS = [] + +class KpiValueWriter(GenericGrpcService): + def __init__(self, cls_name : str = __name__) -> None: + port = get_service_port_grpc(ServiceNameEnum.KPIVALUEWRITER) + super().__init__(port, cls_name=cls_name) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'KpiValueWriter', + 'auto.offset.reset' : 'latest'}) + + def RunKafkaConsumer(self): + thread = threading.Thread(target=self.KafkaKpiConsumer, args=()) + ACTIVE_CONSUMERS.append(thread) + thread.start() + + def KafkaKpiConsumer(self): + kpi_manager_client = KpiManagerClient() + metric_writer = MetricWriterToPrometheus() + + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.VALUE.value]) + LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value)) + print("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value)) + while True: + raw_kpi = consumer.poll(1.0) + if raw_kpi is None: + continue + elif raw_kpi.error(): + if raw_kpi.error().code() == KafkaError._PARTITION_EOF: + continue + else: + print("Consumer error: {}".format(raw_kpi.error())) + continue + try: + kpi_value = json.loads(raw_kpi.value().decode('utf-8')) + LOGGER.info("Received KPI : {:}".format(kpi_value)) + print("Received KPI : {:}".format(kpi_value)) + self.get_kpi_descriptor(kpi_value, kpi_manager_client, metric_writer) + except Exception as e: + print("Error detail: {:}".format(e)) + continue + + def get_kpi_descriptor(self, kpi_value: str, kpi_manager_client, metric_writer): + print("--- START -----") + + kpi_id = KpiId() + kpi_id.kpi_id.uuid = kpi_value['kpi_uuid'] + print("KpiId generated: {:}".format(kpi_id)) + # print("Kpi manger client created: {:}".format(kpi_manager_client)) + try: + kpi_descriptor_object = KpiDescriptor() + kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id) + # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object??? + if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid: + LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + metric_writer.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value) + else: + LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + except Exception as e: + LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e)) + print ("Unable to get KpiDescriptor. Error: {:}".format(e)) diff --git a/src/kpi_value_writer/service/MetricWriterToPrometheus.py b/src/kpi_value_writer/service/MetricWriterToPrometheus.py new file mode 100644 index 0000000000000000000000000000000000000000..85e618a4b5b330cb83cf255652e7be8dff2dabd3 --- /dev/null +++ b/src/kpi_value_writer/service/MetricWriterToPrometheus.py @@ -0,0 +1,87 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# read Kafka stream from Kafka topic + +import logging +from typing import Dict +from prometheus_client import Gauge +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +from common.proto.kpi_value_api_pb2 import KpiValue +from common.proto.kpi_manager_pb2 import KpiDescriptor + +LOGGER = logging.getLogger(__name__) +PROM_METRICS = {} + +class MetricWriterToPrometheus: + ''' + This class exposes the *cooked KPI* on the endpoint to be scraped by the Prometheus server. + cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message) + ''' + def __init__(self): + pass + + def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value): + # Creating a dictionary from the kpi_descriptor's attributes + cooked_kpi = { + 'kpi_id' : kpi_descriptor.kpi_id.kpi_id.uuid, + 'kpi_description': kpi_descriptor.kpi_description, + 'kpi_sample_type': KpiSampleType.Name(kpi_descriptor.kpi_sample_type), + 'device_id' : kpi_descriptor.device_id.device_uuid.uuid, + 'endpoint_id' : kpi_descriptor.endpoint_id.endpoint_uuid.uuid, + 'service_id' : kpi_descriptor.service_id.service_uuid.uuid, + 'slice_id' : kpi_descriptor.slice_id.slice_uuid.uuid, + 'connection_id' : kpi_descriptor.connection_id.connection_uuid.uuid, + 'link_id' : kpi_descriptor.link_id.link_uuid.uuid, + 'time_stamp' : kpi_value['timestamp'], + 'kpi_value' : kpi_value['kpi_value_type'] + } + # LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi)) + return cooked_kpi + + def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: Dict): + # merge both gRPC messages into single varible. + cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value) + tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} + metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude] # These values will be used as metric tags + metric_name = cooked_kpi['kpi_sample_type'] + try: + if metric_name not in PROM_METRICS: # Only register the metric, when it doesn't exists + PROM_METRICS[metric_name] = Gauge ( + metric_name, + cooked_kpi['kpi_description'], + metric_tags + ) + LOGGER.debug("Metric is created with labels: {:}".format(metric_tags)) + PROM_METRICS[metric_name].labels( + kpi_id = cooked_kpi['kpi_id'], + device_id = cooked_kpi['device_id'], + endpoint_id = cooked_kpi['endpoint_id'], + service_id = cooked_kpi['service_id'], + slice_id = cooked_kpi['slice_id'], + connection_id = cooked_kpi['connection_id'], + link_id = cooked_kpi['link_id'], + time_stamp = cooked_kpi['time_stamp'], + ).set(cooked_kpi['kpi_value']) + LOGGER.debug("Metric pushed to the endpoints: {:}".format(PROM_METRICS[metric_name])) + + except ValueError as e: + if 'Duplicated timeseries' in str(e): + LOGGER.debug("Metric {:} is already registered. Skipping.".format(metric_name)) + print("Metric {:} is already registered. Skipping.".format(metric_name)) + else: + LOGGER.error("Error while pushing metric: {}".format(e)) + raise + diff --git a/src/kpi_value_writer/service/__init__.py b/src/kpi_value_writer/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/kpi_value_writer/service/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/kpi_value_writer/service/__main__.py b/src/kpi_value_writer/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..be9f8f29bfdb2397eedd0ce2821c5da8f778cfc4 --- /dev/null +++ b/src/kpi_value_writer/service/__main__.py @@ -0,0 +1,54 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from kpi_value_writer.service.KpiValueWriter import KpiValueWriter +from common.Settings import get_log_level + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level) + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.debug('Starting...') + + grpc_service = KpiValueWriter() + grpc_service.start() + + start_http_server(10808) + LOGGER.debug("Prometheus client is started on port 10808") + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.debug('Terminating...') + grpc_service.stop() + + LOGGER.debug('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/kpi_value_writer/tests/test_kpi_value_writer.py b/src/kpi_value_writer/tests/test_kpi_value_writer.py new file mode 100755 index 0000000000000000000000000000000000000000..b784fae5da713f9bd7cd7a1668f48b080f7a84fa --- /dev/null +++ b/src/kpi_value_writer/tests/test_kpi_value_writer.py @@ -0,0 +1,33 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from kpi_value_writer.service.KpiValueWriter import KpiValueWriter + +from common.tools.kafka.Variables import KafkaTopic + + + +LOGGER = logging.getLogger(__name__) + +# -------- Initial Test ---------------- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +def test_KafkaConsumer(): + LOGGER.debug(" --->>> test_kafka_consumer: START <<<--- ") + kpi_value_writer = KpiValueWriter() + kpi_value_writer.RunKafkaConsumer() diff --git a/src/kpi_value_writer/tests/test_messages.py b/src/kpi_value_writer/tests/test_messages.py new file mode 100755 index 0000000000000000000000000000000000000000..89a41fa08ad37b7d9b305bba6e7c445fea5cd18a --- /dev/null +++ b/src/kpi_value_writer/tests/test_messages.py @@ -0,0 +1,44 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid, time +import random +from common.proto import kpi_manager_pb2 +from common.proto.kpi_value_api_pb2 import KpiValue +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +def create_kpi_id_request(): + _create_kpi_id = kpi_manager_pb2.KpiId() + _create_kpi_id.kpi_id.uuid = str(uuid.uuid4()) + return _create_kpi_id + +def create_kpi_descriptor_request(description: str = "Test Description"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_kpi_request.kpi_description = description + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + _create_kpi_request.device_id.device_uuid.uuid = 'DEV4' + _create_kpi_request.service_id.service_uuid.uuid = 'SERV3' + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC3' + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END2' + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2' + _create_kpi_request.link_id.link_uuid.uuid = 'LNK2' + return _create_kpi_request + +def create_kpi_value_request(): + _create_kpi_value_request = KpiValue() + _create_kpi_value_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_kpi_value_request.timestamp.timestamp = time.time() + _create_kpi_value_request.kpi_value_type.floatVal = random.randint(10, 10000) + return _create_kpi_value_request diff --git a/src/kpi_value_writer/tests/test_metric_writer_to_prom.py b/src/kpi_value_writer/tests/test_metric_writer_to_prom.py new file mode 100644 index 0000000000000000000000000000000000000000..f60e96253ae8edb29eedcbe2d6e66aaeb450229c --- /dev/null +++ b/src/kpi_value_writer/tests/test_metric_writer_to_prom.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import logging +from kpi_value_writer.service.MetricWriterToPrometheus import MetricWriterToPrometheus +from kpi_value_writer.tests.test_messages import create_kpi_descriptor_request, create_kpi_value_request + +LOGGER = logging.getLogger(__name__) + +def test_metric_writer_to_prometheus(): + LOGGER.info(' >>> test_metric_writer_to_prometheus START <<< ') + metric_writer_obj = MetricWriterToPrometheus() + metric_writer_obj.create_and_expose_cooked_kpi( + create_kpi_descriptor_request(), + create_kpi_value_request() + ) diff --git a/src/l3_centralizedattackdetector/requirements.in b/src/l3_centralizedattackdetector/requirements.in index 34513101381471027dbdbab4d3a615e057acd92e..14808cba5d26a479095ff112b505febff095bdcd 100644 --- a/src/l3_centralizedattackdetector/requirements.in +++ b/src/l3_centralizedattackdetector/requirements.in @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -numpy==1.23.* +numpy<2.0.0 onnxruntime==1.12.* scikit-learn==1.1.* diff --git a/src/l3_distributedattackdetector/requirements.in b/src/l3_distributedattackdetector/requirements.in index 6deb8d906f733e25bfac07bbe82b536b4774f5bb..1d2fbafc26397ee41314686a202938d42c9a22c0 100644 --- a/src/l3_distributedattackdetector/requirements.in +++ b/src/l3_distributedattackdetector/requirements.in @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -numpy==1.23.* +numpy<2.0.0 asyncio==3.4.3 diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in index 8684cb22350416818c20e881993a62d4f10a2e9e..3b67c00ee6056de089cde8d9b7faeef05d75336a 100644 --- a/src/monitoring/requirements.in +++ b/src/monitoring/requirements.in @@ -18,7 +18,7 @@ APScheduler==3.10.1 #google-api-core #opencensus[stackdriver] #google-cloud-profiler -#numpy +numpy<2.0.0 #Jinja2==3.0.3 #ncclient==0.6.13 #p4runtime==1.3.0 diff --git a/src/nbi/requirements.in b/src/nbi/requirements.in index 78d941974c62e32251373a805056068608b0bda2..4c5460a8e2b3c05d994bbaba4bd2939e629db1e2 100644 --- a/src/nbi/requirements.in +++ b/src/nbi/requirements.in @@ -22,5 +22,6 @@ libyang==2.8.0 netaddr==0.9.0 pyang==2.6.0 git+https://github.com/robshakir/pyangbind.git +pydantic==2.6.3 requests==2.27.1 werkzeug==2.3.7 diff --git a/src/nbi/service/__main__.py b/src/nbi/service/__main__.py index 362b0116d6f0bdbc4d1fa2025c09ac23c828617f..58fbb9625addc43c6b62d06d7a9caa3f648203d5 100644 --- a/src/nbi/service/__main__.py +++ b/src/nbi/service/__main__.py @@ -18,13 +18,16 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables) + from .NbiService import NbiService from .rest_server.RestServer import RestServer from .rest_server.nbi_plugins.etsi_bwm import register_etsi_bwm_api +from .rest_server.nbi_plugins.ietf_hardware import register_ietf_hardware from .rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn from .rest_server.nbi_plugins.ietf_l3vpn import register_ietf_l3vpn from .rest_server.nbi_plugins.ietf_network import register_ietf_network from .rest_server.nbi_plugins.ietf_network_slice import register_ietf_nss +from .rest_server.nbi_plugins.ietf_acl import register_ietf_acl from .rest_server.nbi_plugins.tfs_api import register_tfs_api terminate = threading.Event() @@ -63,13 +66,23 @@ def main(): rest_server = RestServer() register_etsi_bwm_api(rest_server) + register_ietf_hardware(rest_server) register_ietf_l2vpn(rest_server) # Registering L2VPN entrypoint register_ietf_l3vpn(rest_server) # Registering L3VPN entrypoint register_ietf_network(rest_server) register_ietf_nss(rest_server) # Registering NSS entrypoint + register_ietf_acl(rest_server) register_tfs_api(rest_server) rest_server.start() + LOGGER.debug('Configured Resources:') + for resource in rest_server.api.resources: + LOGGER.debug(' - {:s}'.format(str(resource))) + + LOGGER.debug('Configured Rules:') + for rule in rest_server.app.url_map.iter_rules(): + LOGGER.debug(' - {:s}'.format(str(rule))) + # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=1.0): pass diff --git a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py index 4c6ad47bc210316908ed3e3676abbda6757cf615..7f9360e00f5891b6cac0ae5020bd4fbc5ab7d9c1 100644 --- a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py +++ b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py @@ -13,12 +13,15 @@ # limitations under the License. import copy, deepmerge, json, logging +from typing import Dict +from flask_restful import Resource, request +from werkzeug.exceptions import UnsupportedMediaType from common.Constants import DEFAULT_CONTEXT_NAME from context.client.ContextClient import ContextClient -from flask_restful import Resource, request from service.client.ServiceClient import ServiceClient from .Tools import ( - format_grpc_to_json, grpc_context_id, grpc_service_id, bwInfo_2_service, service_2_bwInfo) + format_grpc_to_json, grpc_context_id, grpc_service_id, bwInfo_2_service, service_2_bwInfo +) LOGGER = logging.getLogger(__name__) @@ -37,15 +40,20 @@ class BwInfo(_Resource): return bw_allocations def post(self): - bwinfo = request.get_json() - service = bwInfo_2_service(self.client, bwinfo) + if not request.is_json: + raise UnsupportedMediaType('JSON payload is required') + request_data: Dict = request.get_json() + service = bwInfo_2_service(self.client, request_data) stripped_service = copy.deepcopy(service) stripped_service.ClearField('service_endpoint_ids') stripped_service.ClearField('service_constraints') stripped_service.ClearField('service_config') - response = format_grpc_to_json(self.service_client.CreateService(stripped_service)) - response = format_grpc_to_json(self.service_client.UpdateService(service)) + try: + response = format_grpc_to_json(self.service_client.CreateService(stripped_service)) + response = format_grpc_to_json(self.service_client.UpdateService(service)) + except Exception as e: # pylint: disable=broad-except + return e return response diff --git a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py index 59436708cca2fcf7ff0ff65aa4977e2ccfaeda95..55efa48b12b61cb44c23fc2995679afe38351368 100644 --- a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py +++ b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py @@ -12,20 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import logging -import time +import json, logging, re, time from decimal import ROUND_HALF_EVEN, Decimal from flask.json import jsonify from common.proto.context_pb2 import ( - ContextId, Empty, EndPointId, ServiceId, ServiceTypeEnum, Service, Constraint, Constraint_SLA_Capacity, - ConfigRule, ConfigRule_Custom, ConfigActionEnum) + ContextId, Empty, EndPointId, ServiceId, ServiceStatusEnum, ServiceTypeEnum, + Service, Constraint, Constraint_SLA_Capacity, ConfigRule, ConfigRule_Custom, + ConfigActionEnum +) +from common.tools.grpc.ConfigRules import update_config_rule_custom from common.tools.grpc.Tools import grpc_message_to_json from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Service import json_service_id LOGGER = logging.getLogger(__name__) +ENDPOINT_SETTINGS_KEY = '/device[{:s}]/endpoint[{:s}]/vlan[{:d}]/settings' +DEVICE_SETTINGS_KEY = '/device[{:s}]/settings' +RE_CONFIG_RULE_IF_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$') +MEC_CONSIDERED_FIELDS = ['requestType', 'sessionFilter', 'fixedAllocation', 'allocationDirection', 'fixedBWPriority'] +ALLOCATION_DIRECTION_DESCRIPTIONS = { + '00' : 'Downlink (towards the UE)', + '01' : 'Uplink (towards the application/session)', + '10' : 'Symmetrical'} +VLAN_TAG = 0 +PREFIX_LENGTH = 24 +BGP_AS = 65000 +POLICY_AZ = 'srv_{:d}_a'.format(VLAN_TAG) +POLICY_ZA = 'srv_{:d}_b'.format(VLAN_TAG) +BGP_NEIGHBOR_IP_A = '192.168.150.1' +BGP_NEIGHBOR_IP_Z = '192.168.150.2' +ROUTER_ID_A = '200.1.1.1' +ROUTER_ID_Z = '200.1.1.2' +ROUTE_DISTINGUISHER = '{:5d}:{:03d}'.format(BGP_AS, VLAN_TAG) def service_2_bwInfo(service: Service) -> dict: response = {} @@ -40,12 +59,19 @@ def service_2_bwInfo(service: Service) -> dict: break for config_rule in service.service_config.config_rules: + resource_value_json = json.loads(config_rule.custom.resource_value) + if config_rule.custom.resource_key != '/request': + continue for key in ['allocationDirection', 'fixedBWPriority', 'requestType', 'sourceIp', 'sourcePort', 'dstPort', 'protocol', 'sessionFilter']: - if config_rule.custom.resource_key == key: - if key != 'sessionFilter': - response[key] = config_rule.custom.resource_value - else: - response[key] = json.loads(config_rule.custom.resource_value) + if key not in resource_value_json: + continue + + if key == 'sessionFilter': + response[key] = [resource_value_json[key]] + elif key == 'requestType': + response[key] = str(resource_value_json[key]) + else: + response[key] = resource_value_json[key] unixtime = time.time() response['timeStamp'] = { # Time stamp to indicate when the corresponding information elements are sent @@ -55,47 +81,108 @@ def service_2_bwInfo(service: Service) -> dict: return response -def bwInfo_2_service(client, bwInfo: dict) -> Service: +def bwInfo_2_service(client, bw_info: dict) -> Service: + # add description to allocationDirection code + if 'sessionFilter' in bw_info: + bw_info['sessionFilter'] = bw_info['sessionFilter'][0] # Discard other items in sessionFilter field + service = Service() - for key in ['allocationDirection', 'fixedBWPriority', 'requestType', 'timeStamp', 'sessionFilter']: - if key not in bwInfo: - continue - config_rule = ConfigRule() - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule_custom = ConfigRule_Custom() - config_rule_custom.resource_key = key - if key != 'sessionFilter': - config_rule_custom.resource_value = str(bwInfo[key]) - else: - config_rule_custom.resource_value = json.dumps(bwInfo[key]) - config_rule.custom.CopyFrom(config_rule_custom) - service.service_config.config_rules.append(config_rule) - - if 'sessionFilter' in bwInfo: - a_ip = bwInfo['sessionFilter'][0]['sourceIp'] - z_ip = bwInfo['sessionFilter'][0]['dstAddress'] + + service_config_rules = service.service_config.config_rules + + + request_cr_key = '/request' + request_cr_value = {k:bw_info[k] for k in MEC_CONSIDERED_FIELDS} + + config_rule = ConfigRule() + config_rule.action = ConfigActionEnum.CONFIGACTION_SET + config_rule_custom = ConfigRule_Custom() + config_rule_custom.resource_key = request_cr_key + config_rule_custom.resource_value = json.dumps(request_cr_value) + config_rule.custom.CopyFrom(config_rule_custom) + service_config_rules.append(config_rule) + + if 'sessionFilter' in bw_info: + a_ip = bw_info['sessionFilter']['sourceIp'] + z_ip = bw_info['sessionFilter']['dstAddress'] devices = client.ListDevices(Empty()).devices + ip_interface_name_dict = {} for device in devices: + device_endpoint_uuids = {ep.name:ep.endpoint_id.endpoint_uuid.uuid for ep in device.device_endpoints} + skip_device = True for cr in device.device_config.config_rules: - if cr.WhichOneof('config_rule') == 'custom' and cr.custom.resource_key == '_connect/settings': - for ep in json.loads(cr.custom.resource_value)['endpoints']: - if 'ip' in ep and (ep['ip'] == a_ip or ep['ip'] == z_ip): - ep_id = EndPointId() - ep_id.endpoint_uuid.uuid = ep['uuid'] - ep_id.device_id.device_uuid.uuid = device.device_id.device_uuid.uuid - service.service_endpoint_ids.append(ep_id) - + if cr.WhichOneof('config_rule') != 'custom': + continue + match_subif = RE_CONFIG_RULE_IF_SUBIF.match(cr.custom.resource_key) + if not match_subif: + continue + address_ip = json.loads(cr.custom.resource_value).get('address_ip') + short_port_name = match_subif.groups(0)[0] + ip_interface_name_dict[address_ip] = short_port_name + if address_ip not in [a_ip, z_ip]: + continue + port_name = 'PORT-' + short_port_name # `PORT-` added as prefix + ep_id = EndPointId() + ep_id.endpoint_uuid.uuid = device_endpoint_uuids[port_name] + ep_id.device_id.device_uuid.uuid = device.device_id.device_uuid.uuid + service.service_endpoint_ids.append(ep_id) + # add interface config rules + endpoint_settings_key = ENDPOINT_SETTINGS_KEY.format(device.name, port_name, VLAN_TAG) + if address_ip in a_ip: + router_id = ROUTER_ID_A + policy_az = POLICY_AZ + policy_za = POLICY_ZA + neighbor_bgp_interface_address_ip = BGP_NEIGHBOR_IP_Z + self_bgp_interface_address_ip = BGP_NEIGHBOR_IP_A + else: + router_id = ROUTER_ID_Z + policy_az = POLICY_ZA + policy_za = POLICY_AZ + neighbor_bgp_interface_address_ip= BGP_NEIGHBOR_IP_A + self_bgp_interface_address_ip = BGP_NEIGHBOR_IP_Z + endpoint_field_updates = { + 'address_ip': (address_ip, True), + 'address_prefix' : (PREFIX_LENGTH, True), + 'sub_interface_index': (0, True), + } + LOGGER.debug(f'BEFORE UPDATE -> device.device_config.config_rules: {service_config_rules}') + update_config_rule_custom(service_config_rules, endpoint_settings_key, endpoint_field_updates) + LOGGER.debug(f'AFTER UPDATE -> device.device_config.config_rules: {service_config_rules}') + skip_device = False + if skip_device: + continue + device_field_updates = { + 'bgp_as':(BGP_AS, True), + 'route_distinguisher': (ROUTE_DISTINGUISHER, True), + 'router_id': (router_id, True), + 'policy_AZ': (policy_az, True), + 'policy_ZA': (policy_za, True), + 'neighbor_bgp_interface_address_ip': (neighbor_bgp_interface_address_ip, True), + 'self_bgp_interface_name': (ip_interface_name_dict[self_bgp_interface_address_ip], True), + 'self_bgp_interface_address_ip': (self_bgp_interface_address_ip, True), + 'bgp_interface_address_prefix': (PREFIX_LENGTH, True) + } + device_settings_key = DEVICE_SETTINGS_KEY.format(device.name) + LOGGER.debug(f'BEFORE UPDATE -> device.device_config.config_rules: {service_config_rules}') + update_config_rule_custom(service_config_rules, device_settings_key, device_field_updates) + LOGGER.debug(f'AFTER UPDATE -> device.device_config.config_rules: {service_config_rules}') + + settings_cr_key = '/settings' + settings_cr_value = {} + update_config_rule_custom(service_config_rules, settings_cr_key, settings_cr_value) + + service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED service.service_type = ServiceTypeEnum.SERVICETYPE_L3NM - if 'appInsId' in bwInfo: - service.service_id.service_uuid.uuid = bwInfo['appInsId'] + if 'appInsId' in bw_info: + service.service_id.service_uuid.uuid = bw_info['appInsId'] service.service_id.context_id.context_uuid.uuid = 'admin' - service.name = bwInfo['appInsId'] + service.name = bw_info['appInsId'] - if 'fixedAllocation' in bwInfo: + if 'fixedAllocation' in bw_info: capacity = Constraint_SLA_Capacity() - capacity.capacity_gbps = float(bwInfo['fixedAllocation']) / 1.e9 + capacity.capacity_gbps = float(bw_info['fixedAllocation']) / 1.e9 constraint = Constraint() constraint.sla_capacity.CopyFrom(capacity) service.service_constraints.append(constraint) diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acl.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acl.py new file mode 100644 index 0000000000000000000000000000000000000000..3e2f1389e6786a5cef322ecfaf64c12112409619 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acl.py @@ -0,0 +1,75 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, re +from flask_restful import Resource +from werkzeug.exceptions import NotFound +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule +from common.tools.context_queries.Device import get_device +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from nbi.service.rest_server.nbi_plugins.tools.Authentication import HTTP_AUTH +from .ietf_acl_parser import ietf_acl_from_config_rule_resource_value + +LOGGER = logging.getLogger(__name__) + +ACL_CONIG_RULE_KEY = r'\/device\[.+\]\/endpoint\[(.+)\]/acl_ruleset\[{}\]' + +class Acl(Resource): + @HTTP_AUTH.login_required + def get(self, device_uuid : str, acl_name : str): + LOGGER.debug('GET device_uuid={:s}, acl_name={:s}'.format(str(device_uuid), str(acl_name))) + RE_ACL_CONIG_RULE_KEY = re.compile(ACL_CONIG_RULE_KEY.format(acl_name)) + + context_client = ContextClient() + device = get_device(context_client, device_uuid, rw_copy=False, include_config_rules=True) + if device is None: raise NotFound('Device({:s}) not found'.format(str(device_uuid))) + + for config_rule in device.device_config.config_rules: + if config_rule.WhichOneof('config_rule') != 'custom': continue + ep_uuid_match = RE_ACL_CONIG_RULE_KEY.match(config_rule.custom.resource_key) + if ep_uuid_match is None: continue + resource_value_dict = json.loads(config_rule.custom.resource_value) + return ietf_acl_from_config_rule_resource_value(resource_value_dict) + + raise NotFound('Acl({:s}) not found in Device({:s})'.format(str(acl_name), str(device_uuid))) + + @HTTP_AUTH.login_required + def delete(self, device_uuid : str, acl_name : str): + LOGGER.debug('DELETE device_uuid={:s}, acl_name={:s}'.format(str(device_uuid), str(acl_name))) + RE_ACL_CONIG_RULE_KEY = re.compile(ACL_CONIG_RULE_KEY.format(acl_name)) + + context_client = ContextClient() + device = get_device(context_client, device_uuid, rw_copy=True, include_config_rules=True) + if device is None: raise NotFound('Device({:s}) not found'.format(str(device_uuid))) + + delete_config_rules = list() + for config_rule in device.device_config.config_rules: + if config_rule.WhichOneof('config_rule') != 'custom': continue + ep_uuid_match = RE_ACL_CONIG_RULE_KEY.match(config_rule.custom.resource_key) + if ep_uuid_match is None: continue + + _config_rule = ConfigRule() + _config_rule.CopyFrom(config_rule) + _config_rule.action = ConfigActionEnum.CONFIGACTION_DELETE + delete_config_rules.append(_config_rule) + + if len(delete_config_rules) == 0: + raise NotFound('Acl({:s}) not found in Device({:s})'.format(str(acl_name), str(device_uuid))) + + device_client = DeviceClient() + del device.device_config.config_rules[:] + device.device_config.config_rules.extend(delete_config_rules) + device_client.ConfigureDevice(device) + return None diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acls.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acls.py new file mode 100644 index 0000000000000000000000000000000000000000..1814abbb415cfbaee205ff7880fb299e70b5dba1 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acls.py @@ -0,0 +1,131 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Dict, List, Set +from flask import jsonify, request +from flask_restful import Resource +from werkzeug.exceptions import BadRequest, NotFound, UnsupportedMediaType +from common.proto.context_pb2 import ConfigRule +from common.tools.context_queries.Device import get_device +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from nbi.service.rest_server.nbi_plugins.tools.Authentication import HTTP_AUTH +from .ietf_acl_parser import AclDirectionEnum, config_rule_from_ietf_acl +from .YangValidator import YangValidator + +LOGGER = logging.getLogger(__name__) + + +def compose_interface_direction_acl_rules( + device_name : str, interface_name : str, interface_data : Dict, + acl_direction : AclDirectionEnum, acl_name__to__acl_data : Dict[str, Dict] +) -> List[ConfigRule]: + acl_direction_name = acl_direction.value + acl_direction_title = str(acl_direction_name).title() + direction_data : Dict[str, Dict] = interface_data.get(acl_direction_name, {}) + acl_sets : Dict[str, Dict] = direction_data.get('acl-sets', {}) + acl_set_list : List[Dict] = acl_sets .get('acl-set', []) + acl_set_names : Set[str] = {acl_set['name'] for acl_set in acl_set_list} + + acl_config_rules : List[ConfigRule] = list() + for acl_set_name in acl_set_names: + acl_set = acl_name__to__acl_data.get(acl_set_name) + if acl_set is None: + MSG = 'Interface({:s})/{:s}/AclSet({:s}) not found' + raise NotFound(MSG.format( + str(interface_name), acl_direction_title, + str(acl_set_name) + )) + + acl_config_rule = config_rule_from_ietf_acl( + device_name, interface_name, acl_set + ) + MSG = 'Adding {:s} ACL Config Rule: {:s}' + LOGGER.info(MSG.format( + acl_direction_title, grpc_message_to_json_string(acl_config_rule) + )) + acl_config_rules.append(acl_config_rule) + + return acl_config_rules + +class Acls(Resource): + @HTTP_AUTH.login_required + def get(self): + return {} + + @HTTP_AUTH.login_required + def post(self, device_uuid : str): + if not request.is_json: + LOGGER.warning('POST device_uuid={:s}, body={:s}'.format(str(device_uuid), str(request.data))) + raise UnsupportedMediaType('JSON payload is required') + request_data : Dict = request.json + LOGGER.debug('POST device_uuid={:s}, body={:s}'.format(str(device_uuid), json.dumps(request_data))) + + context_client = ContextClient() + device = get_device( + context_client, device_uuid, rw_copy=True, include_config_rules=False, include_components=False + ) + if device is None: + raise NotFound('Device({:s}) not found'.format(str(device_uuid))) + + device_name = device.name + interface_names : Set[str] = set() + for endpoint in device.device_endpoints: + interface_names.add(endpoint.endpoint_id.endpoint_uuid.uuid) + interface_names.add(endpoint.name) + + yang_validator = YangValidator() + request_data = yang_validator.parse_to_dict(request_data, list(interface_names)) + yang_validator.destroy() + + acls : Dict = request_data.get('acls', {}) + acl_list : List = acls.get('acl', []) + acl_name__to__acl_data = { + acl['name'] : acl + for acl in acl_list + } + + if len(acl_name__to__acl_data) == 0: + raise BadRequest('No ACLs defined in the request') + + interface_list : List = acls.get('attachment-points', {}).get('interface', []) + interface_name__to__interface_data = { + interface['interface-id'] : interface + for interface in interface_list + } + + if len(interface_name__to__interface_data) == 0: + raise BadRequest('No interfaces defined in the request') + + for interface_name in interface_names: + interface_data = interface_name__to__interface_data.get(interface_name) + if interface_data is None: continue + + ingress_acl_config_rules = compose_interface_direction_acl_rules( + device_name, interface_name, interface_data, AclDirectionEnum.INGRESS, + acl_name__to__acl_data + ) + device.device_config.config_rules.extend(ingress_acl_config_rules) + + egress_acl_config_rules = compose_interface_direction_acl_rules( + device_name, interface_name, interface_data, AclDirectionEnum.EGRESS, + acl_name__to__acl_data + ) + device.device_config.config_rules.extend(egress_acl_config_rules) + + device_client = DeviceClient() + device_client.ConfigureDevice(device) + return jsonify({}) diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/YangValidator.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/YangValidator.py new file mode 100644 index 0000000000000000000000000000000000000000..56bf9b30c1bc2ab6a36a3d59519b544cd3c00ef3 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/YangValidator.py @@ -0,0 +1,111 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, json, libyang, logging, os +from typing import Dict, List, Optional + +LOGGER = logging.getLogger(__name__) + +YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang') +YANG_MODULES = [ + 'ietf-yang-types', + 'ietf-interfaces', + 'iana-if-type', + 'ietf-access-control-list', +] + +class YangValidator: + def __init__(self) -> None: + self._yang_context = libyang.Context(YANG_DIR) + for module_name in YANG_MODULES: + LOGGER.info('Loading module: {:s}'.format(str(module_name))) + yang_module = self._yang_context.load_module(module_name) + yang_module.feature_enable_all() + + def parse_to_dict(self, message : Dict, interface_names : List[str]) -> Dict: + LOGGER.debug('[parse_to_dict] message={:s}'.format(json.dumps(message))) + LOGGER.debug('[parse_to_dict] interface_names={:s}'.format(json.dumps(interface_names))) + + # Inject synthetic interfaces for validation purposes + interfaces = self._yang_context.create_data_path('/ietf-interfaces:interfaces') + for if_index,interface_name in enumerate(interface_names): + if_path = 'interface[name="{:s}"]'.format(str(interface_name)) + interface = interfaces.create_path(if_path) + interface.create_path('if-index', if_index + 1) + interface.create_path('type', 'iana-if-type:ethernetCsmacd') + interface.create_path('admin-status', 'up') + interface.create_path('oper-status', 'up') + statistics = interface.create_path('statistics') + statistics.create_path('discontinuity-time', '2024-07-11T10:00:00.000000Z') + + extended_message = copy.deepcopy(message) + extended_message['ietf-interfaces:interfaces'] = interfaces.print_dict()['interfaces'] + LOGGER.debug('[parse_to_dict] extended_message={:s}'.format(json.dumps(extended_message))) + + dnode : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(extended_message), 'json', validate_present=True, strict=True + ) + if dnode is None: + LOGGER.error('[parse_to_dict] unable to parse message') + raise Exception('Unable to parse Message({:s})'.format(str(message))) + message_dict = dnode.print_dict() + LOGGER.debug('[parse_to_dict] message_dict={:s}'.format(json.dumps(message_dict))) + + dnode.free() + interfaces.free() + return message_dict + + def destroy(self) -> None: + self._yang_context.destroy() + self._yang_context = None + +def main() -> None: + import uuid # pylint: disable=import-outside-toplevel + logging.basicConfig(level=logging.DEBUG) + + interface_names = {'200', '500', str(uuid.uuid4()), str(uuid.uuid4())} + ACL_RULE = {"ietf-access-control-list:acls": { + "acl": [{ + "name": "sample-ipv4-acl", "type": "ipv4-acl-type", + "aces": {"ace": [{ + "name": "rule1", + "matches": { + "ipv4": { + "source-ipv4-network": "128.32.10.6/24", + "destination-ipv4-network": "172.10.33.0/24", + "dscp": 18 + }, + "tcp": { + "source-port": {"operator": "eq", "port": 1444}, + "destination-port": {"operator": "eq", "port": 1333}, + "flags": "syn" + } + }, + "actions": {"forwarding": "drop"} + }]} + }], + "attachment-points": {"interface": [{ + "interface-id": "200", + "ingress": {"acl-sets": {"acl-set": [{"name": "sample-ipv4-acl"}]}} + }] + }}} + + yang_validator = YangValidator() + request_data = yang_validator.parse_to_dict(ACL_RULE, list(interface_names)) + yang_validator.destroy() + + LOGGER.info('request_data = {:s}'.format(str(request_data))) + +if __name__ == '__main__': + main() diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/__init__.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3538b24ba56b2a6011b76b3878c4bef690fe1fc8 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask_restful import Resource +from nbi.service.rest_server.RestServer import RestServer +from .Acl import Acl +from .Acls import Acls + +URL_PREFIX = '/restconf/data' + +def __add_resource(rest_server: RestServer, resource: Resource, *urls, **kwargs): + urls = [(URL_PREFIX + url) for url in urls] + rest_server.add_resource(resource, *urls, **kwargs) + +def register_ietf_acl(rest_server: RestServer): + __add_resource( + rest_server, + Acls, + '/device=<path:device_uuid>/ietf-access-control-list:acls', + ) + + __add_resource( + rest_server, + Acl, + '/device=<path:device_uuid>/ietf-access-control-list:acl=<path:acl_name>', + '/device=<path:device_uuid>/ietf-access-control-list:acl=<path:acl_name>/', + ) diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/ietf_acl_parser.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/ietf_acl_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..085d680d177d2f48d41c1160c3a70b6c7c4209cb --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/ietf_acl_parser.py @@ -0,0 +1,257 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import List, Dict +from pydantic import BaseModel, Field +from werkzeug.exceptions import NotImplemented +from common.proto.acl_pb2 import AclForwardActionEnum, AclRuleTypeEnum, AclEntry +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule + +class AclDirectionEnum(Enum): + INGRESS = 'ingress' + EGRESS = 'egress' + +class Ipv4(BaseModel): + dscp: int = 0 + source_ipv4_network: str = Field(serialization_alias="source-ipv4-network", default="") + destination_ipv4_network: str = Field(serialization_alias="destination-ipv4-network", default="") + +class Port(BaseModel): + port: int = 0 + operator: str = "eq" + +class Tcp(BaseModel): + flags: str = "" + source_port: Port = Field(serialization_alias="source-port", default_factory=lambda: Port()) + destination_port: Port = Field(serialization_alias="destination-port", default_factory=lambda: Port()) + +class Matches(BaseModel): + ipv4: Ipv4 = Ipv4() + tcp: Tcp = Tcp() + +class Action(BaseModel): + forwarding: str = "" + +class Ace(BaseModel): + name: str = "custom_rule" + matches: Matches = Matches() + actions: Action = Action() + +class Aces(BaseModel): + ace: List[Ace] = [Ace()] + +class Acl(BaseModel): + name: str = "" + type: str = "" + aces: Aces = Aces() + +class Name(BaseModel): + name: str = "" + +class AclSet(BaseModel): + acl_set: List[Name] = Field(serialization_alias="acl-set", default=[Name()]) + +class AclSets(BaseModel): + acl_sets: AclSet = Field(serialization_alias="acl-sets", default=AclSet()) + +class Ingress(BaseModel): + ingress : AclSets = AclSets() + +class Egress(BaseModel): + egress : AclSets = AclSets() + +class Interface(BaseModel): + interface_id: str = Field(serialization_alias="interface-id", default="") + ingress : Ingress = Ingress() + egress : Egress = Egress() + +class Interfaces(BaseModel): + interface: List[Interface] = [Interface()] + +class AttachmentPoints(BaseModel): + attachment_points: Interfaces = Field(serialization_alias="attachment-points", default=Interfaces()) + +class Acls(BaseModel): + acl: List[Acl] = [Acl()] + attachment_points: AttachmentPoints = Field(serialization_alias="attachment-points", default=AttachmentPoints()) + +class IETF_ACL(BaseModel): + acls: Acls = Acls() + + +IETF_TFS_RULE_TYPE_MAPPING = { + "ipv4-acl-type": "ACLRULETYPE_IPV4", + "ipv6-acl-type": "ACLRULETYPE_IPV6", +} + +IETF_TFS_FORWARDING_ACTION_MAPPING = { + "accept": "ACLFORWARDINGACTION_ACCEPT", + "drop" : "ACLFORWARDINGACTION_DROP", +} + +TFS_IETF_RULE_TYPE_MAPPING = { + "ACLRULETYPE_IPV4": "ipv4-acl-type", + "ACLRULETYPE_IPV6": "ipv6-acl-type", +} + +TFS_IETF_FORWARDING_ACTION_MAPPING = { + "ACLFORWARDINGACTION_ACCEPT": "accept", + "ACLFORWARDINGACTION_DROP" : "drop", +} + +def config_rule_from_ietf_acl( + device_name : str, endpoint_name : str, acl_set_data : Dict +) -> ConfigRule: + acl_config_rule = ConfigRule() + acl_config_rule.action = ConfigActionEnum.CONFIGACTION_SET + acl_endpoint_id = acl_config_rule.acl.endpoint_id + acl_endpoint_id.device_id.device_uuid.uuid = device_name + acl_endpoint_id.endpoint_uuid.uuid = endpoint_name + + acl_name = acl_set_data['name'] + acl_type = acl_set_data['type'] + if acl_type.startswith('ietf-access-control-list:'): + acl_type = acl_type.replace('ietf-access-control-list:', '') + acl_type = getattr(AclRuleTypeEnum, IETF_TFS_RULE_TYPE_MAPPING[acl_type]) + + acl_rule_set = acl_config_rule.acl.rule_set + acl_rule_set.name = acl_name + acl_rule_set.type = acl_type + #acl_rule_set.description = ... + + access_control_entry_list = acl_set_data.get('aces', {}).get('ace', []) + for sequence_id,ace in enumerate(access_control_entry_list): + ace_name = ace['name'] + ace_matches = ace.get('matches', {}) + ace_actions = ace.get('actions', {}) + + acl_entry = AclEntry() + acl_entry.sequence_id = sequence_id + 1 + #acl_entry.description = ... + + if 'ipv4' in ace_matches: + ipv4_data = ace_matches['ipv4'] + if 'source-ipv4-network' in ipv4_data: + acl_entry.match.src_address = ipv4_data['source-ipv4-network'] + if 'destination-ipv4-network' in ipv4_data: + acl_entry.match.dst_address = ipv4_data['destination-ipv4-network'] + if 'dscp' in ipv4_data: + acl_entry.match.dscp = ipv4_data['dscp'] + if 'protocol' in ipv4_data: + acl_entry.match.protocol = ipv4_data['protocol'] + + if 'tcp' in ace_matches: + # https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + acl_entry.match.protocol = 6 + tcp_data = ace_matches['tcp'] + if 'source-port' in tcp_data: + tcp_src_port : Dict = tcp_data['source-port'] + tcp_src_port_op = tcp_src_port.get('operator', 'eq') + if tcp_src_port_op != 'eq': + MSG = 'Acl({:s})/Ace({:s})/Match/Tcp({:s}) operator not supported' + raise NotImplemented(MSG.format(acl_name, ace_name, str(tcp_data))) + acl_entry.match.src_port = tcp_src_port['port'] + if 'destination-port' in tcp_data: + tcp_dst_port : Dict = tcp_data['destination-port'] + tcp_dst_port_op = tcp_dst_port.get('operator', 'eq') + if tcp_dst_port_op != 'eq': + MSG = 'Acl({:s})/Ace({:s})/Match/Tcp({:s}) operator not supported' + raise NotImplemented(MSG.format(acl_name, ace_name, str(tcp_data))) + acl_entry.match.dst_port = tcp_dst_port['port'] + if 'flags' in tcp_data: + acl_entry.match.tcp_flags = tcp_data['flags'] + + if 'udp' in ace_matches: + # https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + acl_entry.match.protocol = 17 + udp_data = ace_matches['udp'] + if 'source-port' in udp_data: + udp_src_port : Dict = udp_data['source-port'] + udp_src_port_op = udp_src_port.get('operator', 'eq') + if udp_src_port_op != 'eq': + MSG = 'Acl({:s})/Ace({:s})/Match/Udp({:s}) operator not supported' + raise NotImplemented(MSG.format(acl_name, ace_name, str(udp_data))) + acl_entry.match.src_port = udp_src_port['port'] + if 'destination-port' in udp_data: + udp_dst_port : Dict = udp_data['destination-port'] + udp_dst_port_op = udp_dst_port.get('operator', 'eq') + if udp_dst_port_op != 'eq': + MSG = 'Acl({:s})/Ace({:s})/Match/Udp({:s}) operator not supported' + raise NotImplemented(MSG.format(acl_name, ace_name, str(udp_data))) + acl_entry.match.dst_port = udp_dst_port['port'] + + if 'forwarding' in ace_actions: + ace_forward_action = ace_actions['forwarding'] + if ace_forward_action.startswith('ietf-access-control-list:'): + ace_forward_action = ace_forward_action.replace('ietf-access-control-list:', '') + ace_forward_action = IETF_TFS_FORWARDING_ACTION_MAPPING[ace_forward_action] + acl_entry.action.forward_action = getattr(AclForwardActionEnum, ace_forward_action) + + acl_rule_set.entries.append(acl_entry) + + return acl_config_rule + +def ietf_acl_from_config_rule_resource_value(config_rule_rv: Dict) -> Dict: + rule_set = config_rule_rv['rule_set'] + acl_entry = rule_set['entries'][0] + match_ = acl_entry['match'] + + ipv4 = Ipv4( + dscp=match_["dscp"], + source_ipv4_network=match_["src_address"], + destination_ipv4_network=match_["dst_address"] + ) + tcp = Tcp( + flags=match_["tcp_flags"], + source_port=Port(port=match_["src_port"]), + destination_port=Port(port=match_["dst_port"]) + ) + matches = Matches(ipvr=ipv4, tcp=tcp) + aces = Aces(ace=[ + Ace( + matches=matches, + actions=Action( + forwarding=TFS_IETF_FORWARDING_ACTION_MAPPING[acl_entry["action"]["forward_action"]] + ) + ) + ]) + acl = Acl( + name=rule_set["name"], + type=TFS_IETF_RULE_TYPE_MAPPING[rule_set["type"]], + aces=aces + ) + acl_sets = AclSets( + acl_sets=AclSet( + acl_set=[ + Name(name=rule_set["name"]) + ] + ) + ) + ingress = Ingress(ingress=acl_sets) + interfaces = Interfaces(interface=[ + Interface( + interface_id=config_rule_rv["interface"], + ingress=ingress + ) + ]) + acls = Acls( + acl=[acl], + attachment_points=AttachmentPoints( + attachment_points=interfaces + ) + ) + ietf_acl = IETF_ACL(acls=acls) + + return ietf_acl.model_dump(by_alias=True) diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/iana-if-type@2014-05-08.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/iana-if-type@2014-05-08.yang new file mode 100644 index 0000000000000000000000000000000000000000..8d52d16f505074ed5c147b22f248bb2ceb89352a --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/iana-if-type@2014-05-08.yang @@ -0,0 +1,1508 @@ +module iana-if-type { + namespace "urn:ietf:params:xml:ns:yang:iana-if-type"; + prefix ianaift; + + import ietf-interfaces { + prefix if; + } + + organization "IANA"; + contact + " Internet Assigned Numbers Authority + + Postal: ICANN + 4676 Admiralty Way, Suite 330 + Marina del Rey, CA 90292 + + Tel: +1 310 823 9358 + <mailto:iana@iana.org>"; + + description + "This YANG module defines YANG identities for IANA-registered + interface types. + + This YANG module is maintained by IANA and reflects the + 'ifType definitions' registry. + + The latest revision of this YANG module can be obtained from + the IANA web site. + + Requests for new values should be made to IANA via + email (iana@iana.org). + + Copyright (c) 2014 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + The initial version of this YANG module is part of RFC 7224; + see the RFC itself for full legal notices."; + reference + "IANA 'ifType definitions' registry. + <http://www.iana.org/assignments/smi-numbers>"; + + revision 2014-05-08 { + description + "Initial revision."; + reference + "RFC 7224: IANA Interface Type YANG Module"; + } + + identity iana-interface-type { + base if:interface-type; + description + "This identity is used as a base for all interface types + defined in the 'ifType definitions' registry."; + } + + identity other { + base iana-interface-type; + } + identity regular1822 { + base iana-interface-type; + } + identity hdh1822 { + base iana-interface-type; + } + identity ddnX25 { + base iana-interface-type; + } + identity rfc877x25 { + base iana-interface-type; + reference + "RFC 1382 - SNMP MIB Extension for the X.25 Packet Layer"; + } + identity ethernetCsmacd { + base iana-interface-type; + description + "For all Ethernet-like interfaces, regardless of speed, + as per RFC 3635."; + reference + "RFC 3635 - Definitions of Managed Objects for the + Ethernet-like Interface Types"; + } + identity iso88023Csmacd { + base iana-interface-type; + status deprecated; + description + "Deprecated via RFC 3635. + Use ethernetCsmacd(6) instead."; + reference + "RFC 3635 - Definitions of Managed Objects for the + Ethernet-like Interface Types"; + } + identity iso88024TokenBus { + base iana-interface-type; + } + identity iso88025TokenRing { + base iana-interface-type; + } + identity iso88026Man { + base iana-interface-type; + } + identity starLan { + base iana-interface-type; + status deprecated; + description + "Deprecated via RFC 3635. + Use ethernetCsmacd(6) instead."; + reference + "RFC 3635 - Definitions of Managed Objects for the + Ethernet-like Interface Types"; + } + identity proteon10Mbit { + base iana-interface-type; + } + identity proteon80Mbit { + base iana-interface-type; + } + identity hyperchannel { + base iana-interface-type; + } + identity fddi { + base iana-interface-type; + reference + "RFC 1512 - FDDI Management Information Base"; + } + identity lapb { + base iana-interface-type; + reference + "RFC 1381 - SNMP MIB Extension for X.25 LAPB"; + } + identity sdlc { + base iana-interface-type; + } + identity ds1 { + base iana-interface-type; + description + "DS1-MIB."; + reference + "RFC 4805 - Definitions of Managed Objects for the + DS1, J1, E1, DS2, and E2 Interface Types"; + } + identity e1 { + base iana-interface-type; + status obsolete; + description + "Obsolete; see DS1-MIB."; + reference + "RFC 4805 - Definitions of Managed Objects for the + DS1, J1, E1, DS2, and E2 Interface Types"; + } + identity basicISDN { + base iana-interface-type; + description + "No longer used. See also RFC 2127."; + } + identity primaryISDN { + base iana-interface-type; + description + "No longer used. See also RFC 2127."; + } + identity propPointToPointSerial { + base iana-interface-type; + description + "Proprietary serial."; + } + identity ppp { + base iana-interface-type; + } + identity softwareLoopback { + base iana-interface-type; + } + identity eon { + base iana-interface-type; + description + "CLNP over IP."; + } + identity ethernet3Mbit { + base iana-interface-type; + } + identity nsip { + base iana-interface-type; + description + "XNS over IP."; + } + identity slip { + base iana-interface-type; + description + "Generic SLIP."; + } + identity ultra { + base iana-interface-type; + description + "Ultra Technologies."; + } + identity ds3 { + base iana-interface-type; + description + "DS3-MIB."; + reference + "RFC 3896 - Definitions of Managed Objects for the + DS3/E3 Interface Type"; + } + identity sip { + base iana-interface-type; + description + "SMDS, coffee."; + reference + "RFC 1694 - Definitions of Managed Objects for SMDS + Interfaces using SMIv2"; + } + identity frameRelay { + base iana-interface-type; + description + "DTE only."; + reference + "RFC 2115 - Management Information Base for Frame Relay + DTEs Using SMIv2"; + } + identity rs232 { + base iana-interface-type; + reference + "RFC 1659 - Definitions of Managed Objects for RS-232-like + Hardware Devices using SMIv2"; + } + identity para { + base iana-interface-type; + description + "Parallel-port."; + reference + "RFC 1660 - Definitions of Managed Objects for + Parallel-printer-like Hardware Devices using + SMIv2"; + } + identity arcnet { + base iana-interface-type; + description + "ARCnet."; + } + identity arcnetPlus { + base iana-interface-type; + description + "ARCnet Plus."; + } + identity atm { + base iana-interface-type; + description + "ATM cells."; + } + identity miox25 { + base iana-interface-type; + reference + "RFC 1461 - SNMP MIB extension for Multiprotocol + Interconnect over X.25"; + } + identity sonet { + base iana-interface-type; + description + "SONET or SDH."; + } + identity x25ple { + base iana-interface-type; + reference + "RFC 2127 - ISDN Management Information Base using SMIv2"; + } + identity iso88022llc { + base iana-interface-type; + } + identity localTalk { + base iana-interface-type; + } + identity smdsDxi { + base iana-interface-type; + } + identity frameRelayService { + base iana-interface-type; + description + "FRNETSERV-MIB."; + reference + "RFC 2954 - Definitions of Managed Objects for Frame + Relay Service"; + } + identity v35 { + base iana-interface-type; + } + identity hssi { + base iana-interface-type; + } + identity hippi { + base iana-interface-type; + } + identity modem { + base iana-interface-type; + description + "Generic modem."; + } + identity aal5 { + base iana-interface-type; + description + "AAL5 over ATM."; + } + identity sonetPath { + base iana-interface-type; + } + identity sonetVT { + base iana-interface-type; + } + identity smdsIcip { + base iana-interface-type; + description + "SMDS InterCarrier Interface."; + } + identity propVirtual { + base iana-interface-type; + description + "Proprietary virtual/internal."; + reference + "RFC 2863 - The Interfaces Group MIB"; + } + identity propMultiplexor { + base iana-interface-type; + description + "Proprietary multiplexing."; + reference + "RFC 2863 - The Interfaces Group MIB"; + } + identity ieee80212 { + base iana-interface-type; + description + "100BaseVG."; + } + identity fibreChannel { + base iana-interface-type; + description + "Fibre Channel."; + } + identity hippiInterface { + base iana-interface-type; + description + "HIPPI interfaces."; + } + identity frameRelayInterconnect { + base iana-interface-type; + status obsolete; + description + "Obsolete; use either + frameRelay(32) or frameRelayService(44)."; + } + identity aflane8023 { + base iana-interface-type; + description + "ATM Emulated LAN for 802.3."; + } + identity aflane8025 { + base iana-interface-type; + description + "ATM Emulated LAN for 802.5."; + } + identity cctEmul { + base iana-interface-type; + description + "ATM Emulated circuit."; + } + identity fastEther { + base iana-interface-type; + status deprecated; + description + "Obsoleted via RFC 3635. + ethernetCsmacd(6) should be used instead."; + reference + "RFC 3635 - Definitions of Managed Objects for the + Ethernet-like Interface Types"; + } + identity isdn { + base iana-interface-type; + description + "ISDN and X.25."; + reference + "RFC 1356 - Multiprotocol Interconnect on X.25 and ISDN + in the Packet Mode"; + } + identity v11 { + base iana-interface-type; + description + "CCITT V.11/X.21."; + } + identity v36 { + base iana-interface-type; + description + "CCITT V.36."; + } + identity g703at64k { + base iana-interface-type; + description + "CCITT G703 at 64Kbps."; + } + identity g703at2mb { + base iana-interface-type; + status obsolete; + description + "Obsolete; see DS1-MIB."; + } + identity qllc { + base iana-interface-type; + description + "SNA QLLC."; + } + identity fastEtherFX { + base iana-interface-type; + status deprecated; + description + "Obsoleted via RFC 3635. + ethernetCsmacd(6) should be used instead."; + reference + "RFC 3635 - Definitions of Managed Objects for the + Ethernet-like Interface Types"; + } + identity channel { + base iana-interface-type; + description + "Channel."; + } + identity ieee80211 { + base iana-interface-type; + description + "Radio spread spectrum."; + } + identity ibm370parChan { + base iana-interface-type; + description + "IBM System 360/370 OEMI Channel."; + } + identity escon { + base iana-interface-type; + description + "IBM Enterprise Systems Connection."; + } + identity dlsw { + base iana-interface-type; + description + "Data Link Switching."; + } + identity isdns { + base iana-interface-type; + description + "ISDN S/T interface."; + } + identity isdnu { + base iana-interface-type; + description + "ISDN U interface."; + } + identity lapd { + base iana-interface-type; + description + "Link Access Protocol D."; + } + identity ipSwitch { + base iana-interface-type; + description + "IP Switching Objects."; + } + identity rsrb { + base iana-interface-type; + description + "Remote Source Route Bridging."; + } + identity atmLogical { + base iana-interface-type; + description + "ATM Logical Port."; + reference + "RFC 3606 - Definitions of Supplemental Managed Objects + for ATM Interface"; + } + identity ds0 { + base iana-interface-type; + description + "Digital Signal Level 0."; + reference + "RFC 2494 - Definitions of Managed Objects for the DS0 + and DS0 Bundle Interface Type"; + } + identity ds0Bundle { + base iana-interface-type; + description + "Group of ds0s on the same ds1."; + reference + "RFC 2494 - Definitions of Managed Objects for the DS0 + and DS0 Bundle Interface Type"; + } + identity bsc { + base iana-interface-type; + description + "Bisynchronous Protocol."; + } + identity async { + base iana-interface-type; + description + "Asynchronous Protocol."; + } + identity cnr { + base iana-interface-type; + description + "Combat Net Radio."; + } + identity iso88025Dtr { + base iana-interface-type; + description + "ISO 802.5r DTR."; + } + identity eplrs { + base iana-interface-type; + description + "Ext Pos Loc Report Sys."; + } + identity arap { + base iana-interface-type; + description + "Appletalk Remote Access Protocol."; + } + identity propCnls { + base iana-interface-type; + description + "Proprietary Connectionless Protocol."; + } + identity hostPad { + base iana-interface-type; + description + "CCITT-ITU X.29 PAD Protocol."; + } + identity termPad { + base iana-interface-type; + description + "CCITT-ITU X.3 PAD Facility."; + } + identity frameRelayMPI { + base iana-interface-type; + description + "Multiproto Interconnect over FR."; + } + identity x213 { + base iana-interface-type; + description + "CCITT-ITU X213."; + } + identity adsl { + base iana-interface-type; + description + "Asymmetric Digital Subscriber Loop."; + } + identity radsl { + base iana-interface-type; + description + "Rate-Adapt. Digital Subscriber Loop."; + } + identity sdsl { + base iana-interface-type; + description + "Symmetric Digital Subscriber Loop."; + } + identity vdsl { + base iana-interface-type; + description + "Very H-Speed Digital Subscrib. Loop."; + } + identity iso88025CRFPInt { + base iana-interface-type; + description + "ISO 802.5 CRFP."; + } + identity myrinet { + base iana-interface-type; + description + "Myricom Myrinet."; + } + identity voiceEM { + base iana-interface-type; + description + "Voice recEive and transMit."; + } + identity voiceFXO { + base iana-interface-type; + description + "Voice Foreign Exchange Office."; + } + identity voiceFXS { + base iana-interface-type; + description + "Voice Foreign Exchange Station."; + } + identity voiceEncap { + base iana-interface-type; + description + "Voice encapsulation."; + } + identity voiceOverIp { + base iana-interface-type; + description + "Voice over IP encapsulation."; + } + identity atmDxi { + base iana-interface-type; + description + "ATM DXI."; + } + identity atmFuni { + base iana-interface-type; + description + "ATM FUNI."; + } + identity atmIma { + base iana-interface-type; + description + "ATM IMA."; + } + identity pppMultilinkBundle { + base iana-interface-type; + description + "PPP Multilink Bundle."; + } + identity ipOverCdlc { + base iana-interface-type; + description + "IBM ipOverCdlc."; + } + identity ipOverClaw { + base iana-interface-type; + description + "IBM Common Link Access to Workstn."; + } + identity stackToStack { + base iana-interface-type; + description + "IBM stackToStack."; + } + identity virtualIpAddress { + base iana-interface-type; + description + "IBM VIPA."; + } + identity mpc { + base iana-interface-type; + description + "IBM multi-protocol channel support."; + } + identity ipOverAtm { + base iana-interface-type; + description + "IBM ipOverAtm."; + reference + "RFC 2320 - Definitions of Managed Objects for Classical IP + and ARP Over ATM Using SMIv2 (IPOA-MIB)"; + } + identity iso88025Fiber { + base iana-interface-type; + description + "ISO 802.5j Fiber Token Ring."; + } + identity tdlc { + base iana-interface-type; + description + "IBM twinaxial data link control."; + } + identity gigabitEthernet { + base iana-interface-type; + status deprecated; + description + "Obsoleted via RFC 3635. + ethernetCsmacd(6) should be used instead."; + reference + "RFC 3635 - Definitions of Managed Objects for the + Ethernet-like Interface Types"; + } + identity hdlc { + base iana-interface-type; + description + "HDLC."; + } + identity lapf { + base iana-interface-type; + description + "LAP F."; + } + identity v37 { + base iana-interface-type; + description + "V.37."; + } + identity x25mlp { + base iana-interface-type; + description + "Multi-Link Protocol."; + } + identity x25huntGroup { + base iana-interface-type; + description + "X25 Hunt Group."; + } + identity transpHdlc { + base iana-interface-type; + description + "Transp HDLC."; + } + identity interleave { + base iana-interface-type; + description + "Interleave channel."; + } + identity fast { + base iana-interface-type; + description + "Fast channel."; + } + identity ip { + base iana-interface-type; + description + "IP (for APPN HPR in IP networks)."; + } + identity docsCableMaclayer { + base iana-interface-type; + description + "CATV Mac Layer."; + } + identity docsCableDownstream { + base iana-interface-type; + description + "CATV Downstream interface."; + } + identity docsCableUpstream { + base iana-interface-type; + description + "CATV Upstream interface."; + } + identity a12MppSwitch { + base iana-interface-type; + description + "Avalon Parallel Processor."; + } + identity tunnel { + base iana-interface-type; + description + "Encapsulation interface."; + } + identity coffee { + base iana-interface-type; + description + "Coffee pot."; + reference + "RFC 2325 - Coffee MIB"; + } + identity ces { + base iana-interface-type; + description + "Circuit Emulation Service."; + } + identity atmSubInterface { + base iana-interface-type; + description + "ATM Sub Interface."; + } + identity l2vlan { + base iana-interface-type; + description + "Layer 2 Virtual LAN using 802.1Q."; + } + identity l3ipvlan { + base iana-interface-type; + description + "Layer 3 Virtual LAN using IP."; + } + identity l3ipxvlan { + base iana-interface-type; + description + "Layer 3 Virtual LAN using IPX."; + } + identity digitalPowerline { + base iana-interface-type; + description + "IP over Power Lines."; + } + identity mediaMailOverIp { + base iana-interface-type; + description + "Multimedia Mail over IP."; + } + identity dtm { + base iana-interface-type; + description + "Dynamic synchronous Transfer Mode."; + } + identity dcn { + base iana-interface-type; + description + "Data Communications Network."; + } + identity ipForward { + base iana-interface-type; + description + "IP Forwarding Interface."; + } + identity msdsl { + base iana-interface-type; + description + "Multi-rate Symmetric DSL."; + } + identity ieee1394 { + base iana-interface-type; + description + "IEEE1394 High Performance Serial Bus."; + } + identity if-gsn { + base iana-interface-type; + description + "HIPPI-6400."; + } + identity dvbRccMacLayer { + base iana-interface-type; + description + "DVB-RCC MAC Layer."; + } + identity dvbRccDownstream { + base iana-interface-type; + description + "DVB-RCC Downstream Channel."; + } + identity dvbRccUpstream { + base iana-interface-type; + description + "DVB-RCC Upstream Channel."; + } + identity atmVirtual { + base iana-interface-type; + description + "ATM Virtual Interface."; + } + identity mplsTunnel { + base iana-interface-type; + description + "MPLS Tunnel Virtual Interface."; + } + identity srp { + base iana-interface-type; + description + "Spatial Reuse Protocol."; + } + identity voiceOverAtm { + base iana-interface-type; + description + "Voice over ATM."; + } + identity voiceOverFrameRelay { + base iana-interface-type; + description + "Voice Over Frame Relay."; + } + identity idsl { + base iana-interface-type; + description + "Digital Subscriber Loop over ISDN."; + } + identity compositeLink { + base iana-interface-type; + description + "Avici Composite Link Interface."; + } + identity ss7SigLink { + base iana-interface-type; + description + "SS7 Signaling Link."; + } + identity propWirelessP2P { + base iana-interface-type; + description + "Prop. P2P wireless interface."; + } + identity frForward { + base iana-interface-type; + description + "Frame Forward Interface."; + } + identity rfc1483 { + base iana-interface-type; + description + "Multiprotocol over ATM AAL5."; + reference + "RFC 1483 - Multiprotocol Encapsulation over ATM + Adaptation Layer 5"; + } + identity usb { + base iana-interface-type; + description + "USB Interface."; + } + identity ieee8023adLag { + base iana-interface-type; + description + "IEEE 802.3ad Link Aggregate."; + } + identity bgppolicyaccounting { + base iana-interface-type; + description + "BGP Policy Accounting."; + } + identity frf16MfrBundle { + base iana-interface-type; + description + "FRF.16 Multilink Frame Relay."; + } + identity h323Gatekeeper { + base iana-interface-type; + description + "H323 Gatekeeper."; + } + identity h323Proxy { + base iana-interface-type; + description + "H323 Voice and Video Proxy."; + } + identity mpls { + base iana-interface-type; + description + "MPLS."; + } + identity mfSigLink { + base iana-interface-type; + description + "Multi-frequency signaling link."; + } + identity hdsl2 { + base iana-interface-type; + description + "High Bit-Rate DSL - 2nd generation."; + } + identity shdsl { + base iana-interface-type; + description + "Multirate HDSL2."; + } + identity ds1FDL { + base iana-interface-type; + description + "Facility Data Link (4Kbps) on a DS1."; + } + identity pos { + base iana-interface-type; + description + "Packet over SONET/SDH Interface."; + } + identity dvbAsiIn { + base iana-interface-type; + description + "DVB-ASI Input."; + } + identity dvbAsiOut { + base iana-interface-type; + description + "DVB-ASI Output."; + } + identity plc { + base iana-interface-type; + description + "Power Line Communications."; + } + identity nfas { + base iana-interface-type; + description + "Non-Facility Associated Signaling."; + } + identity tr008 { + base iana-interface-type; + description + "TR008."; + } + identity gr303RDT { + base iana-interface-type; + description + "Remote Digital Terminal."; + } + identity gr303IDT { + base iana-interface-type; + description + "Integrated Digital Terminal."; + } + identity isup { + base iana-interface-type; + description + "ISUP."; + } + identity propDocsWirelessMaclayer { + base iana-interface-type; + description + "Cisco proprietary Maclayer."; + } + identity propDocsWirelessDownstream { + base iana-interface-type; + description + "Cisco proprietary Downstream."; + } + identity propDocsWirelessUpstream { + base iana-interface-type; + description + "Cisco proprietary Upstream."; + } + identity hiperlan2 { + base iana-interface-type; + description + "HIPERLAN Type 2 Radio Interface."; + } + identity propBWAp2Mp { + base iana-interface-type; + description + "PropBroadbandWirelessAccesspt2Multipt (use of this value + for IEEE 802.16 WMAN interfaces as per IEEE Std 802.16f + is deprecated, and ieee80216WMAN(237) should be used + instead)."; + } + identity sonetOverheadChannel { + base iana-interface-type; + description + "SONET Overhead Channel."; + } + identity digitalWrapperOverheadChannel { + base iana-interface-type; + description + "Digital Wrapper."; + } + identity aal2 { + base iana-interface-type; + description + "ATM adaptation layer 2."; + } + identity radioMAC { + base iana-interface-type; + description + "MAC layer over radio links."; + } + identity atmRadio { + base iana-interface-type; + description + "ATM over radio links."; + } + identity imt { + base iana-interface-type; + description + "Inter-Machine Trunks."; + } + identity mvl { + base iana-interface-type; + description + "Multiple Virtual Lines DSL."; + } + identity reachDSL { + base iana-interface-type; + description + "Long Reach DSL."; + } + identity frDlciEndPt { + base iana-interface-type; + description + "Frame Relay DLCI End Point."; + } + identity atmVciEndPt { + base iana-interface-type; + description + "ATM VCI End Point."; + } + identity opticalChannel { + base iana-interface-type; + description + "Optical Channel."; + } + identity opticalTransport { + base iana-interface-type; + description + "Optical Transport."; + } + identity propAtm { + base iana-interface-type; + description + "Proprietary ATM."; + } + identity voiceOverCable { + base iana-interface-type; + description + "Voice Over Cable Interface."; + } + identity infiniband { + base iana-interface-type; + description + "Infiniband."; + } + identity teLink { + base iana-interface-type; + description + "TE Link."; + } + identity q2931 { + base iana-interface-type; + description + "Q.2931."; + } + identity virtualTg { + base iana-interface-type; + description + "Virtual Trunk Group."; + } + identity sipTg { + base iana-interface-type; + description + "SIP Trunk Group."; + } + identity sipSig { + base iana-interface-type; + description + "SIP Signaling."; + } + identity docsCableUpstreamChannel { + base iana-interface-type; + description + "CATV Upstream Channel."; + } + identity econet { + base iana-interface-type; + description + "Acorn Econet."; + } + identity pon155 { + base iana-interface-type; + description + "FSAN 155Mb Symetrical PON interface."; + } + identity pon622 { + base iana-interface-type; + description + "FSAN 622Mb Symetrical PON interface."; + } + identity bridge { + base iana-interface-type; + description + "Transparent bridge interface."; + } + identity linegroup { + base iana-interface-type; + description + "Interface common to multiple lines."; + } + identity voiceEMFGD { + base iana-interface-type; + description + "Voice E&M Feature Group D."; + } + identity voiceFGDEANA { + base iana-interface-type; + description + "Voice FGD Exchange Access North American."; + } + identity voiceDID { + base iana-interface-type; + description + "Voice Direct Inward Dialing."; + } + identity mpegTransport { + base iana-interface-type; + description + "MPEG transport interface."; + } + identity sixToFour { + base iana-interface-type; + status deprecated; + description + "6to4 interface (DEPRECATED)."; + reference + "RFC 4087 - IP Tunnel MIB"; + } + identity gtp { + base iana-interface-type; + description + "GTP (GPRS Tunneling Protocol)."; + } + identity pdnEtherLoop1 { + base iana-interface-type; + description + "Paradyne EtherLoop 1."; + } + identity pdnEtherLoop2 { + base iana-interface-type; + description + "Paradyne EtherLoop 2."; + } + identity opticalChannelGroup { + base iana-interface-type; + description + "Optical Channel Group."; + } + identity homepna { + base iana-interface-type; + description + "HomePNA ITU-T G.989."; + } + identity gfp { + base iana-interface-type; + description + "Generic Framing Procedure (GFP)."; + } + identity ciscoISLvlan { + base iana-interface-type; + description + "Layer 2 Virtual LAN using Cisco ISL."; + } + identity actelisMetaLOOP { + base iana-interface-type; + description + "Acteleis proprietary MetaLOOP High Speed Link."; + } + identity fcipLink { + base iana-interface-type; + description + "FCIP Link."; + } + identity rpr { + base iana-interface-type; + description + "Resilient Packet Ring Interface Type."; + } + identity qam { + base iana-interface-type; + description + "RF Qam Interface."; + } + identity lmp { + base iana-interface-type; + description + "Link Management Protocol."; + reference + "RFC 4327 - Link Management Protocol (LMP) Management + Information Base (MIB)"; + } + identity cblVectaStar { + base iana-interface-type; + description + "Cambridge Broadband Networks Limited VectaStar."; + } + identity docsCableMCmtsDownstream { + base iana-interface-type; + description + "CATV Modular CMTS Downstream Interface."; + } + identity adsl2 { + base iana-interface-type; + status deprecated; + description + "Asymmetric Digital Subscriber Loop Version 2 + (DEPRECATED/OBSOLETED - please use adsl2plus(238) + instead)."; + reference + "RFC 4706 - Definitions of Managed Objects for Asymmetric + Digital Subscriber Line 2 (ADSL2)"; + } + identity macSecControlledIF { + base iana-interface-type; + description + "MACSecControlled."; + } + identity macSecUncontrolledIF { + base iana-interface-type; + description + "MACSecUncontrolled."; + } + identity aviciOpticalEther { + base iana-interface-type; + description + "Avici Optical Ethernet Aggregate."; + } + identity atmbond { + base iana-interface-type; + description + "atmbond."; + } + identity voiceFGDOS { + base iana-interface-type; + description + "Voice FGD Operator Services."; + } + identity mocaVersion1 { + base iana-interface-type; + description + "MultiMedia over Coax Alliance (MoCA) Interface + as documented in information provided privately to IANA."; + } + identity ieee80216WMAN { + base iana-interface-type; + description + "IEEE 802.16 WMAN interface."; + } + identity adsl2plus { + base iana-interface-type; + description + "Asymmetric Digital Subscriber Loop Version 2 - + Version 2 Plus and all variants."; + } + identity dvbRcsMacLayer { + base iana-interface-type; + description + "DVB-RCS MAC Layer."; + reference + "RFC 5728 - The SatLabs Group DVB-RCS MIB"; + } + identity dvbTdm { + base iana-interface-type; + description + "DVB Satellite TDM."; + reference + "RFC 5728 - The SatLabs Group DVB-RCS MIB"; + } + identity dvbRcsTdma { + base iana-interface-type; + description + "DVB-RCS TDMA."; + reference + "RFC 5728 - The SatLabs Group DVB-RCS MIB"; + } + identity x86Laps { + base iana-interface-type; + description + "LAPS based on ITU-T X.86/Y.1323."; + } + identity wwanPP { + base iana-interface-type; + description + "3GPP WWAN."; + } + identity wwanPP2 { + base iana-interface-type; + description + "3GPP2 WWAN."; + } + identity voiceEBS { + base iana-interface-type; + description + "Voice P-phone EBS physical interface."; + } + identity ifPwType { + base iana-interface-type; + description + "Pseudowire interface type."; + reference + "RFC 5601 - Pseudowire (PW) Management Information Base (MIB)"; + } + identity ilan { + base iana-interface-type; + description + "Internal LAN on a bridge per IEEE 802.1ap."; + } + identity pip { + base iana-interface-type; + description + "Provider Instance Port on a bridge per IEEE 802.1ah PBB."; + } + identity aluELP { + base iana-interface-type; + description + "Alcatel-Lucent Ethernet Link Protection."; + } + identity gpon { + base iana-interface-type; + description + "Gigabit-capable passive optical networks (G-PON) as per + ITU-T G.948."; + } + identity vdsl2 { + base iana-interface-type; + description + "Very high speed digital subscriber line Version 2 + (as per ITU-T Recommendation G.993.2)."; + reference + "RFC 5650 - Definitions of Managed Objects for Very High + Speed Digital Subscriber Line 2 (VDSL2)"; + } + identity capwapDot11Profile { + base iana-interface-type; + description + "WLAN Profile Interface."; + reference + "RFC 5834 - Control and Provisioning of Wireless Access + Points (CAPWAP) Protocol Binding MIB for + IEEE 802.11"; + } + identity capwapDot11Bss { + base iana-interface-type; + description + "WLAN BSS Interface."; + reference + "RFC 5834 - Control and Provisioning of Wireless Access + Points (CAPWAP) Protocol Binding MIB for + IEEE 802.11"; + } + identity capwapWtpVirtualRadio { + base iana-interface-type; + description + "WTP Virtual Radio Interface."; + reference + "RFC 5833 - Control and Provisioning of Wireless Access + Points (CAPWAP) Protocol Base MIB"; + } + identity bits { + base iana-interface-type; + description + "bitsport."; + } + identity docsCableUpstreamRfPort { + base iana-interface-type; + description + "DOCSIS CATV Upstream RF Port."; + } + identity cableDownstreamRfPort { + base iana-interface-type; + description + "CATV downstream RF Port."; + } + identity vmwareVirtualNic { + base iana-interface-type; + description + "VMware Virtual Network Interface."; + } + identity ieee802154 { + base iana-interface-type; + description + "IEEE 802.15.4 WPAN interface."; + reference + "IEEE 802.15.4-2006"; + } + identity otnOdu { + base iana-interface-type; + description + "OTN Optical Data Unit."; + } + identity otnOtu { + base iana-interface-type; + description + "OTN Optical channel Transport Unit."; + } + identity ifVfiType { + base iana-interface-type; + description + "VPLS Forwarding Instance Interface Type."; + } + identity g9981 { + base iana-interface-type; + description + "G.998.1 bonded interface."; + } + identity g9982 { + base iana-interface-type; + description + "G.998.2 bonded interface."; + } + identity g9983 { + base iana-interface-type; + description + "G.998.3 bonded interface."; + } + identity aluEpon { + base iana-interface-type; + description + "Ethernet Passive Optical Networks (E-PON)."; + } + identity aluEponOnu { + base iana-interface-type; + description + "EPON Optical Network Unit."; + } + identity aluEponPhysicalUni { + base iana-interface-type; + description + "EPON physical User to Network interface."; + } + identity aluEponLogicalLink { + base iana-interface-type; + description + "The emulation of a point-to-point link over the EPON + layer."; + } + identity aluGponOnu { + base iana-interface-type; + description + "GPON Optical Network Unit."; + reference + "ITU-T G.984.2"; + } + identity aluGponPhysicalUni { + base iana-interface-type; + description + "GPON physical User to Network interface."; + reference + "ITU-T G.984.2"; + } + identity vmwareNicTeam { + base iana-interface-type; + description + "VMware NIC Team."; + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-access-control-list@2019-03-04.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-access-control-list@2019-03-04.yang new file mode 100644 index 0000000000000000000000000000000000000000..00ae58ee6a63d385c583231f0b84bcdd1bdc41bf --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-access-control-list@2019-03-04.yang @@ -0,0 +1,674 @@ +module ietf-access-control-list { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-access-control-list"; + prefix acl; + + import ietf-yang-types { + prefix yang; + reference + "RFC 6991 - Common YANG Data Types."; + } + + import ietf-packet-fields { + prefix pf; + reference + "RFC 8519 - YANG Data Model for Network Access Control + Lists (ACLs)."; + } + + import ietf-interfaces { + prefix if; + reference + "RFC 8343 - A YANG Data Model for Interface Management."; + } + + organization + "IETF NETMOD (Network Modeling) Working Group."; + + contact + "WG Web: <https://datatracker.ietf.org/wg/netmod/> + WG List: netmod@ietf.org + + Editor: Mahesh Jethanandani + mjethanandani@gmail.com + Editor: Lisa Huang + huangyi_99@yahoo.com + Editor: Sonal Agarwal + sagarwal12@gmail.com + Editor: Dana Blair + dana@blairhome.com"; + + description + "This YANG module defines a component that describes the + configuration and monitoring of Access Control Lists (ACLs). + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', + 'SHALL NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', + 'NOT RECOMMENDED', 'MAY', and 'OPTIONAL' in this document + are to be interpreted as described in BCP 14 (RFC 2119) + (RFC 8174) when, and only when, they appear in all + capitals, as shown here. + + Copyright (c) 2019 IETF Trust and the persons identified as + the document authors. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD + License set forth in Section 4.c of the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8519; see + the RFC itself for full legal notices."; + + revision 2019-03-04 { + description + "Initial version."; + reference + "RFC 8519: YANG Data Model for Network Access Control + Lists (ACLs)."; + } + + /* + * Identities + */ + /* + * Forwarding actions for a packet + */ + + identity forwarding-action { + description + "Base identity for actions in the forwarding category."; + } + + identity accept { + base forwarding-action; + description + "Accept the packet."; + } + + identity drop { + base forwarding-action; + description + "Drop packet without sending any ICMP error message."; + } + + identity reject { + base forwarding-action; + description + "Drop the packet and send an ICMP error message to the source."; + } + + /* + * Logging actions for a packet + */ + + identity log-action { + description + "Base identity for defining the destination for logging + actions."; + } + + identity log-syslog { + base log-action; + description + "System log (syslog) the information for the packet."; + } + + identity log-none { + base log-action; + description + "No logging for the packet."; + } + + /* + * ACL type identities + */ + + identity acl-base { + description + "Base Access Control List type for all Access Control List type + identifiers."; + } + + identity ipv4-acl-type { + base acl:acl-base; + if-feature "ipv4"; + description + "An ACL that matches on fields from the IPv4 header + (e.g., IPv4 destination address) and Layer 4 headers (e.g., TCP + destination port). An ACL of type ipv4 does not contain + matches on fields in the Ethernet header or the IPv6 header."; + } + + identity ipv6-acl-type { + base acl:acl-base; + if-feature "ipv6"; + description + "An ACL that matches on fields from the IPv6 header + (e.g., IPv6 destination address) and Layer 4 headers (e.g., TCP + destination port). An ACL of type ipv6 does not contain + matches on fields in the Ethernet header or the IPv4 header."; + } + + identity eth-acl-type { + base acl:acl-base; + if-feature "eth"; + description + "An ACL that matches on fields in the Ethernet header, + like 10/100/1000baseT or a Wi-Fi Access Control List. An ACL + of type ethernet does not contain matches on fields in the + IPv4 header, the IPv6 header, or Layer 4 headers."; + } + + identity mixed-eth-ipv4-acl-type { + base acl:eth-acl-type; + base acl:ipv4-acl-type; + if-feature "mixed-eth-ipv4"; + description + "An ACL that contains a mix of entries that match + on fields in Ethernet headers and in IPv4 headers. + Matching on Layer 4 header fields may also exist in the + list."; + } + + identity mixed-eth-ipv6-acl-type { + base acl:eth-acl-type; + base acl:ipv6-acl-type; + if-feature "mixed-eth-ipv6"; + description + "An ACL that contains a mix of entries that match on fields + in Ethernet headers and in IPv6 headers. Matching + on Layer 4 header fields may also exist in the list."; + } + + identity mixed-eth-ipv4-ipv6-acl-type { + base acl:eth-acl-type; + base acl:ipv4-acl-type; + base acl:ipv6-acl-type; + if-feature "mixed-eth-ipv4-ipv6"; + description + "An ACL that contains a mix of entries that + match on fields in Ethernet headers, IPv4 headers, and IPv6 + headers. Matching on Layer 4 header fields may also exist + in the list."; + } + + /* + * Features + */ + + /* + * Features supported by device + */ + feature match-on-eth { + description + "The device can support matching on Ethernet headers."; + } + + feature match-on-ipv4 { + description + "The device can support matching on IPv4 headers."; + } + + feature match-on-ipv6 { + description + "The device can support matching on IPv6 headers."; + } + + feature match-on-tcp { + description + "The device can support matching on TCP headers."; + } + + feature match-on-udp { + description + "The device can support matching on UDP headers."; + } + + feature match-on-icmp { + description + "The device can support matching on ICMP (v4 and v6) headers."; + } + + /* + * Header classifications combinations supported by + * device + */ + + feature eth { + if-feature "match-on-eth"; + description + "Plain Ethernet ACL supported."; + } + + feature ipv4 { + if-feature "match-on-ipv4"; + description + "Plain IPv4 ACL supported."; + } + + feature ipv6 { + if-feature "match-on-ipv6"; + description + "Plain IPv6 ACL supported."; + } + + feature mixed-eth-ipv4 { + if-feature "match-on-eth and match-on-ipv4"; + description + "Ethernet and IPv4 ACL combinations supported."; + } + + feature mixed-eth-ipv6 { + if-feature "match-on-eth and match-on-ipv6"; + description + "Ethernet and IPv6 ACL combinations supported."; + } + + feature mixed-eth-ipv4-ipv6 { + if-feature + "match-on-eth and match-on-ipv4 + and match-on-ipv6"; + description + "Ethernet, IPv4, and IPv6 ACL combinations supported."; + } + + /* + * Stats Features + */ + feature interface-stats { + description + "ACL counters are available and reported only per interface."; + } + + feature acl-aggregate-stats { + description + "ACL counters are aggregated over all interfaces and reported + only per ACL entry."; + } + + /* + * Attachment point features + */ + feature interface-attachment { + description + "ACLs are set on interfaces."; + } + + /* + * Typedefs + */ + typedef acl-type { + type identityref { + base acl-base; + } + description + "This type is used to refer to an ACL type."; + } + + /* + * Groupings + */ + grouping acl-counters { + description + "Common grouping for ACL counters."; + leaf matched-packets { + type yang:counter64; + config false; + description + "Count of the number of packets matching the current ACL + entry. + + An implementation should provide this counter on a + per-interface, per-ACL-entry basis if possible. + + If an implementation only supports ACL counters on a per- + entry basis (i.e., not broken out per interface), then the + value should be equal to the aggregate count across all + interfaces. + + An implementation that provides counters on a per-entry, per- + interface basis is not required to also provide an aggregate + count, e.g., per entry -- the user is expected to be able to + implement the required aggregation if such a count is + needed."; + } + + leaf matched-octets { + type yang:counter64; + config false; + description + "Count of the number of octets (bytes) matching the current + ACL entry. + + An implementation should provide this counter on a + per-interface, per-ACL-entry basis if possible. + + If an implementation only supports ACL counters per entry + (i.e., not broken out per interface), then the value + should be equal to the aggregate count across all interfaces. + + An implementation that provides counters per entry per + interface is not required to also provide an aggregate count, + e.g., per entry -- the user is expected to be able to + implement the required aggregation if such a count is needed."; + } + } + + /* + * Configuration and monitoring data nodes + */ + + container acls { + description + "This is a top-level container for Access Control Lists. + It can have one or more acl nodes."; + list acl { + key "name"; + description + "An ACL is an ordered list of ACEs. Each ACE has a + list of match criteria and a list of actions. + Since there are several kinds of ACLs implemented + with different attributes for different vendors, + this model accommodates customizing ACLs for + each kind and for each vendor."; + leaf name { + type string { + length "1..64"; + } + description + "The name of the access list. A device MAY further + restrict the length of this name; space and special + characters are not allowed."; + } + leaf type { + type acl-type; + description + "Type of ACL. Indicates the primary intended + type of match criteria (e.g., Ethernet, IPv4, IPv6, mixed, + etc.) used in the list instance."; + } + container aces { + description + "The aces container contains one or more ACE nodes."; + list ace { + key "name"; + ordered-by user; + description + "List of ACEs."; + leaf name { + type string { + length "1..64"; + } + description + "A unique name identifying this ACE."; + } + + container matches { + description + "The rules in this set determine what fields will be + matched upon before any action is taken on them. + The rules are selected based on the feature set + defined by the server and the acl-type defined. + If no matches are defined in a particular container, + then any packet will match that container. If no + matches are specified at all in an ACE, then any + packet will match the ACE."; + + choice l2 { + container eth { + when "derived-from-or-self(/acls/acl/type, " + + "'acl:eth-acl-type')"; + if-feature "match-on-eth"; + uses pf:acl-eth-header-fields; + description + "Rule set that matches Ethernet headers."; + } + description + "Match Layer 2 headers, for example, Ethernet + header fields."; + } + + choice l3 { + container ipv4 { + when "derived-from-or-self(/acls/acl/type, " + + "'acl:ipv4-acl-type')"; + if-feature "match-on-ipv4"; + uses pf:acl-ip-header-fields; + uses pf:acl-ipv4-header-fields; + description + "Rule set that matches IPv4 headers."; + } + + container ipv6 { + when "derived-from-or-self(/acls/acl/type, " + + "'acl:ipv6-acl-type')"; + if-feature "match-on-ipv6"; + uses pf:acl-ip-header-fields; + uses pf:acl-ipv6-header-fields; + description + "Rule set that matches IPv6 headers."; + } + description + "Choice of either IPv4 or IPv6 headers"; + } + + choice l4 { + container tcp { + if-feature "match-on-tcp"; + uses pf:acl-tcp-header-fields; + container source-port { + choice source-port { + case range-or-operator { + uses pf:port-range-or-operator; + description + "Source port definition from range or + operator."; + } + description + "Choice of source port definition using + range/operator or a choice to support future + 'case' statements, such as one enabling a + group of source ports to be referenced."; + } + description + "Source port definition."; + } + container destination-port { + choice destination-port { + case range-or-operator { + uses pf:port-range-or-operator; + description + "Destination port definition from range or + operator."; + } + description + "Choice of destination port definition using + range/operator or a choice to support future + 'case' statements, such as one enabling a + group of destination ports to be referenced."; + } + description + "Destination port definition."; + } + description + "Rule set that matches TCP headers."; + } + + container udp { + if-feature "match-on-udp"; + uses pf:acl-udp-header-fields; + container source-port { + choice source-port { + case range-or-operator { + uses pf:port-range-or-operator; + description + "Source port definition from range or + operator."; + } + description + "Choice of source port definition using + range/operator or a choice to support future + 'case' statements, such as one enabling a + group of source ports to be referenced."; + } + description + "Source port definition."; + } + container destination-port { + choice destination-port { + case range-or-operator { + uses pf:port-range-or-operator; + description + "Destination port definition from range or + operator."; + } + description + "Choice of destination port definition using + range/operator or a choice to support future + 'case' statements, such as one enabling a + group of destination ports to be referenced."; + } + description + "Destination port definition."; + } + description + "Rule set that matches UDP headers."; + } + + container icmp { + if-feature "match-on-icmp"; + uses pf:acl-icmp-header-fields; + description + "Rule set that matches ICMP headers."; + } + description + "Choice of TCP, UDP, or ICMP headers."; + } + + leaf egress-interface { + type if:interface-ref; + description + + "Egress interface. This should not be used if this ACL + is attached as an egress ACL (or the value should + equal the interface to which the ACL is attached)."; + } + + leaf ingress-interface { + type if:interface-ref; + description + "Ingress interface. This should not be used if this ACL + is attached as an ingress ACL (or the value should + equal the interface to which the ACL is attached)."; + } + } + + container actions { + description + "Definition of actions for this ace entry."; + leaf forwarding { + type identityref { + base forwarding-action; + } + mandatory true; + description + "Specifies the forwarding action per ace entry."; + } + + leaf logging { + type identityref { + base log-action; + } + default "log-none"; + description + "Specifies the log action and destination for + matched packets. Default value is not to log the + packet."; + } + } + container statistics { + if-feature "acl-aggregate-stats"; + config false; + description + "Statistics gathered across all attachment points for the + given ACL."; + uses acl-counters; + } + } + } + } + + container attachment-points { + description + "Enclosing container for the list of + attachment points on which ACLs are set."; + /* + * Groupings + */ + grouping interface-acl { + description + "Grouping for per-interface ingress ACL data."; + container acl-sets { + description + "Enclosing container for the list of ingress ACLs on the + interface."; + list acl-set { + key "name"; + ordered-by user; + description + "List of ingress ACLs on the interface."; + leaf name { + type leafref { + path "/acls/acl/name"; + } + description + "Reference to the ACL name applied on the ingress."; + } + list ace-statistics { + if-feature "interface-stats"; + key "name"; + config false; + description + "List of ACEs."; + leaf name { + type leafref { + path "/acls/acl/aces/ace/name"; + } + description + "Name of the ace entry."; + } + uses acl-counters; + } + } + } + } + + list interface { + if-feature "interface-attachment"; + key "interface-id"; + description + "List of interfaces on which ACLs are set."; + + leaf interface-id { + type if:interface-ref; + description + "Reference to the interface id list key."; + } + + container ingress { + uses interface-acl; + description + "The ACLs applied to the ingress interface."; + } + container egress { + uses interface-acl; + description + "The ACLs applied to the egress interface."; + } + } + } + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-ethertypes@2019-03-04.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-ethertypes@2019-03-04.yang new file mode 100644 index 0000000000000000000000000000000000000000..115c05ce0644ccfab07a96f6b8e5bc31b954a5f6 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-ethertypes@2019-03-04.yang @@ -0,0 +1,381 @@ +module ietf-ethertypes { + namespace "urn:ietf:params:xml:ns:yang:ietf-ethertypes"; + prefix ethertypes; + + organization + "IETF NETMOD (Network Modeling) Working Group."; + + contact + "WG Web: <https://datatracker.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + Editor: Mahesh Jethanandani + <mjethanandani@gmail.com>"; + + description + "This module contains common definitions for the + Ethertype used by different modules. It is a + placeholder module, till such time that IEEE + starts a project to define these Ethertypes + and publishes a standard. + + At that time, this module can be deprecated. + + Copyright (c) 2019 IETF Trust and the persons identified as + the document authors. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD + License set forth in Section 4.c of the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8519; see + the RFC itself for full legal notices."; + + revision 2019-03-04 { + description + "Initial revision."; + reference + "RFC 8519: YANG Data Model for Network Access Control + Lists (ACLs)."; + } + + typedef ethertype { + type union { + type uint16; + type enumeration { + enum ipv4 { + value 2048; + description + "Internet Protocol version 4 (IPv4) with a + hex value of 0x0800."; + reference + "RFC 791: Internet Protocol."; + } + enum arp { + value 2054; + description + "Address Resolution Protocol (ARP) with a + hex value of 0x0806."; + reference + "RFC 826: An Ethernet Address Resolution Protocol: Or + Converting Network Protocol Addresses to 48.bit + Ethernet Address for Transmission on Ethernet + Hardware."; + } + enum wlan { + value 2114; + description + "Wake-on-LAN. Hex value of 0x0842."; + } + enum trill { + value 8947; + description + "Transparent Interconnection of Lots of Links. + Hex value of 0x22F3."; + reference + "RFC 6325: Routing Bridges (RBridges): Base Protocol + Specification."; + } + enum srp { + value 8938; + description + "Stream Reservation Protocol. Hex value of + 0x22EA."; + reference + "IEEE 801.1Q-2011."; + } + enum decnet { + value 24579; + description + "DECnet Phase IV. Hex value of 0x6003."; + } + enum rarp { + value 32821; + description + "Reverse Address Resolution Protocol. + Hex value 0x8035."; + reference + "RFC 903: A Reverse Address Resolution Protocol."; + } + enum appletalk { + value 32923; + description + "Appletalk (Ethertalk). Hex value of 0x809B."; + } + enum aarp { + value 33011; + description + "Appletalk Address Resolution Protocol. Hex value + of 0x80F3."; + } + enum vlan { + value 33024; + description + "VLAN-tagged frame (IEEE 802.1Q) and Shortest Path + Bridging IEEE 802.1aq with Network-Network + Interface (NNI) compatibility. Hex value of + 0x8100."; + reference + "IEEE 802.1Q."; + } + enum ipx { + value 33079; + description + "Internetwork Packet Exchange (IPX). Hex value + of 0x8137."; + } + enum qnx { + value 33284; + description + "QNX Qnet. Hex value of 0x8204."; + } + enum ipv6 { + value 34525; + description + "Internet Protocol Version 6 (IPv6). Hex value + of 0x86DD."; + reference + "RFC 8200: Internet Protocol, Version 6 (IPv6) + Specification + RFC 8201: Path MTU Discovery for IP version 6."; + } + enum efc { + value 34824; + description + "Ethernet flow control using pause frames. + Hex value of 0x8808."; + reference + "IEEE 802.1Qbb."; + } + enum esp { + value 34825; + description + "Ethernet Slow Protocol. Hex value of 0x8809."; + reference + "IEEE 802.3-2015."; + } + enum cobranet { + value 34841; + description + "CobraNet. Hex value of 0x8819."; + } + enum mpls-unicast { + value 34887; + description + "Multiprotocol Label Switching (MPLS) unicast traffic. + Hex value of 0x8847."; + reference + "RFC 3031: Multiprotocol Label Switching Architecture."; + } + enum mpls-multicast { + value 34888; + description + "MPLS multicast traffic. Hex value of 0x8848."; + reference + "RFC 3031: Multiprotocol Label Switching Architecture."; + } + enum pppoe-discovery { + value 34915; + description + "Point-to-Point Protocol over Ethernet. Used during + the discovery process. Hex value of 0x8863."; + reference + "RFC 2516: A Method for Transmitting PPP Over Ethernet + (PPPoE)."; + } + enum pppoe-session { + value 34916; + description + "Point-to-Point Protocol over Ethernet. Used during + session stage. Hex value of 0x8864."; + reference + "RFC 2516: A Method for Transmitting PPP Over Ethernet + (PPPoE)."; + } + enum intel-ans { + value 34925; + description + "Intel Advanced Networking Services. Hex value of + 0x886D."; + } + enum jumbo-frames { + value 34928; + description + "Jumbo frames or Ethernet frames with more than + 1500 bytes of payload, up to 9000 bytes."; + } + enum homeplug { + value 34939; + description + "Family name for the various power line + communications. Hex value of 0x887B."; + } + enum eap { + value 34958; + description + "Ethernet Access Protocol (EAP) over LAN. Hex value + of 0x888E."; + reference + "IEEE 802.1X."; + } + enum profinet { + value 34962; + description + "PROcess FIeld Net (PROFINET). Hex value of 0x8892."; + } + enum hyperscsi { + value 34970; + description + "Small Computer System Interface (SCSI) over Ethernet. + Hex value of 0x889A."; + } + enum aoe { + value 34978; + description + "Advanced Technology Advancement (ATA) over Ethernet. + Hex value of 0x88A2."; + } + enum ethercat { + value 34980; + description + "Ethernet for Control Automation Technology (EtherCAT). + Hex value of 0x88A4."; + } + enum provider-bridging { + value 34984; + description + "Provider Bridging (802.1ad) and Shortest Path Bridging + (801.1aq). Hex value of 0x88A8."; + reference + "IEEE 802.1ad and IEEE 802.1aq)."; + } + enum ethernet-powerlink { + value 34987; + description + "Ethernet Powerlink. Hex value of 0x88AB."; + } + enum goose { + value 35000; + description + "Generic Object Oriented Substation Event (GOOSE). + Hex value of 0x88B8."; + reference + "IEC/ISO 8802-2 and 8802-3."; + } + enum gse { + value 35001; + description + "Generic Substation Events. Hex value of 88B9."; + reference + "IEC 61850."; + } + enum sv { + value 35002; + description + "Sampled Value Transmission. Hex value of 0x88BA."; + reference + "IEC 61850."; + } + enum lldp { + value 35020; + description + "Link Layer Discovery Protocol (LLDP). Hex value of + 0x88CC."; + reference + "IEEE 802.1AB."; + } + enum sercos { + value 35021; + description + "Sercos Interface. Hex value of 0x88CD."; + } + enum wsmp { + value 35036; + description + "WAVE Short Message Protocol (WSMP). Hex value of + 0x88DC."; + } + enum homeplug-av-mme { + value 35041; + description + "HomePlug AV Mobile Management Entity (MME). Hex value + of 88E1."; + } + enum mrp { + value 35043; + description + "Media Redundancy Protocol (MRP). Hex value of + 0x88E3."; + reference + "IEC 62439-2."; + } + enum macsec { + value 35045; + description + "MAC Security. Hex value of 0x88E5."; + reference + "IEEE 802.1AE."; + } + enum pbb { + value 35047; + description + "Provider Backbone Bridges (PBB). Hex value of + 0x88E7."; + reference + "IEEE 802.1ah."; + } + enum cfm { + value 35074; + description + "Connectivity Fault Management (CFM). Hex value of + 0x8902."; + reference + "IEEE 802.1ag."; + } + enum fcoe { + value 35078; + description + "Fiber Channel over Ethernet (FCoE). Hex value of + 0x8906."; + reference + "T11 FC-BB-5."; + } + enum fcoe-ip { + value 35092; + description + "FCoE Initialization Protocol. Hex value of 0x8914."; + } + enum roce { + value 35093; + description + "RDMA over Converged Ethernet (RoCE). Hex value of + 0x8915."; + } + enum tte { + value 35101; + description + "TTEthernet Protocol Control Frame (TTE). Hex value + of 0x891D."; + reference + "SAE AS6802."; + } + enum hsr { + value 35119; + description + "High-availability Seamless Redundancy (HSR). Hex + value of 0x892F."; + reference + "IEC 62439-3:2016."; + } + } + } + description + "The uint16 type placeholder is defined to enable + users to manage their own ethertypes not + covered by the module. Otherwise, the module contains + enum definitions for the more commonly used ethertypes."; + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-inet-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-inet-types@2013-07-15.yang new file mode 100644 index 0000000000000000000000000000000000000000..790bafc31dd7dc3582ef1c765fe104145b8a6016 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-inet-types@2013-07-15.yang @@ -0,0 +1,459 @@ + module ietf-inet-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types"; + prefix "inet"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types for Internet addresses and related things. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - ip-address-no-zone + - ipv4-address-no-zone + - ipv6-address-no-zone"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of types related to protocol fields ***/ + + typedef ip-version { + type enumeration { + enum unknown { + value "0"; + description + "An unknown or unspecified version of the Internet + protocol."; + } + enum ipv4 { + value "1"; + description + "The IPv4 protocol as defined in RFC 791."; + } + enum ipv6 { + value "2"; + description + "The IPv6 protocol as defined in RFC 2460."; + } + } + description + "This value represents the version of the IP protocol. + + In the value set and its semantics, this type is equivalent + to the InetVersion textual convention of the SMIv2."; + reference + "RFC 791: Internet Protocol + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + typedef dscp { + type uint8 { + range "0..63"; + } + description + "The dscp type represents a Differentiated Services Code Point + that may be used for marking packets in a traffic stream. + + In the value set and its semantics, this type is equivalent + to the Dscp textual convention of the SMIv2."; + reference + "RFC 3289: Management Information Base for the Differentiated + Services Architecture + RFC 2474: Definition of the Differentiated Services Field + (DS Field) in the IPv4 and IPv6 Headers + RFC 2780: IANA Allocation Guidelines For Values In + the Internet Protocol and Related Headers"; + } + + typedef ipv6-flow-label { + type uint32 { + range "0..1048575"; + } + description + "The ipv6-flow-label type represents the flow identifier or Flow + Label in an IPv6 packet header that may be used to + discriminate traffic flows. + + In the value set and its semantics, this type is equivalent + to the IPv6FlowLabel textual convention of the SMIv2."; + reference + "RFC 3595: Textual Conventions for IPv6 Flow Label + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification"; + } + + typedef port-number { + type uint16 { + range "0..65535"; + } + description + "The port-number type represents a 16-bit port number of an + Internet transport-layer protocol such as UDP, TCP, DCCP, or + SCTP. Port numbers are assigned by IANA. A current list of + all assignments is available from <http://www.iana.org/>. + + Note that the port number value zero is reserved by IANA. In + situations where the value zero does not make sense, it can + be excluded by subtyping the port-number type. + In the value set and its semantics, this type is equivalent + to the InetPortNumber textual convention of the SMIv2."; + reference + "RFC 768: User Datagram Protocol + RFC 793: Transmission Control Protocol + RFC 4960: Stream Control Transmission Protocol + RFC 4340: Datagram Congestion Control Protocol (DCCP) + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + /*** collection of types related to autonomous systems ***/ + + typedef as-number { + type uint32; + description + "The as-number type represents autonomous system numbers + which identify an Autonomous System (AS). An AS is a set + of routers under a single technical administration, using + an interior gateway protocol and common metrics to route + packets within the AS, and using an exterior gateway + protocol to route packets to other ASes. IANA maintains + the AS number space and has delegated large parts to the + regional registries. + + Autonomous system numbers were originally limited to 16 + bits. BGP extensions have enlarged the autonomous system + number space to 32 bits. This type therefore uses an uint32 + base type without a range restriction in order to support + a larger autonomous system number space. + + In the value set and its semantics, this type is equivalent + to the InetAutonomousSystemNumber textual convention of + the SMIv2."; + reference + "RFC 1930: Guidelines for creation, selection, and registration + of an Autonomous System (AS) + RFC 4271: A Border Gateway Protocol 4 (BGP-4) + RFC 4001: Textual Conventions for Internet Network Addresses + RFC 6793: BGP Support for Four-Octet Autonomous System (AS) + Number Space"; + } + + /*** collection of types related to IP addresses and hostnames ***/ + + typedef ip-address { + type union { + type inet:ipv4-address; + type inet:ipv6-address; + } + description + "The ip-address type represents an IP address and is IP + version neutral. The format of the textual representation + implies the IP version. This type supports scoped addresses + by allowing zone identifiers in the address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '(%[\p{N}\p{L}]+)?'; + } + description + "The ipv4-address type represents an IPv4 address in + dotted-quad notation. The IPv4 address may include a zone + index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format for the zone index is the numerical + format"; + } + + typedef ipv6-address { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(%[\p{N}\p{L}]+)?'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(%.+)?'; + } + description + "The ipv6-address type represents an IPv6 address in full, + mixed, shortened, and shortened-mixed notation. The IPv6 + address may include a zone index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format of IPv6 addresses uses the textual + representation defined in Section 4 of RFC 5952. The + canonical format for the zone index is the numerical + format as described in Section 11.2 of RFC 4007."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-address-no-zone { + type union { + type inet:ipv4-address-no-zone; + type inet:ipv6-address-no-zone; + } + description + "The ip-address-no-zone type represents an IP address and is + IP version neutral. The format of the textual representation + implies the IP version. This type does not support scoped + addresses since it does not allow zone identifiers in the + address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address-no-zone { + type inet:ipv4-address { + pattern '[0-9\.]*'; + } + description + "An IPv4 address without a zone index. This type, derived from + ipv4-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + } + + typedef ipv6-address-no-zone { + type inet:ipv6-address { + pattern '[0-9a-fA-F:\.]*'; + } + description + "An IPv6 address without a zone index. This type, derived from + ipv6-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-prefix { + type union { + type inet:ipv4-prefix; + type inet:ipv6-prefix; + } + description + "The ip-prefix type represents an IP prefix and is IP + version neutral. The format of the textual representations + implies the IP version."; + } + + typedef ipv4-prefix { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '/(([0-9])|([1-2][0-9])|(3[0-2]))'; + } + description + "The ipv4-prefix type represents an IPv4 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 32. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The canonical format of an IPv4 prefix has all bits of + the IPv4 address set to zero that are not part of the + IPv4 prefix."; + } + + typedef ipv6-prefix { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(/.+)'; + } + + description + "The ipv6-prefix type represents an IPv6 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 128. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The IPv6 address should have all bits that do not belong + to the prefix set to zero. + + The canonical format of an IPv6 prefix has all bits of + the IPv6 address set to zero that are not part of the + IPv6 prefix. Furthermore, the IPv6 address is represented + as defined in Section 4 of RFC 5952."; + reference + "RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + /*** collection of domain name and URI types ***/ + + typedef domain-name { + type string { + pattern + '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*' + + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)' + + '|\.'; + length "1..253"; + } + description + "The domain-name type represents a DNS domain name. The + name SHOULD be fully qualified whenever possible. + + Internet domain names are only loosely specified. Section + 3.5 of RFC 1034 recommends a syntax (modified in Section + 2.1 of RFC 1123). The pattern above is intended to allow + for current practice in domain name use, and some possible + future expansion. It is designed to hold various types of + domain names, including names used for A or AAAA records + (host names) and other records, such as SRV records. Note + that Internet host names have a stricter syntax (described + in RFC 952) than the DNS recommendations in RFCs 1034 and + 1123, and that systems that want to store host names in + schema nodes using the domain-name type are recommended to + adhere to this stricter standard to ensure interoperability. + + The encoding of DNS names in the DNS protocol is limited + to 255 characters. Since the encoding consists of labels + prefixed by a length bytes and there is a trailing NULL + byte, only 253 characters can appear in the textual dotted + notation. + + The description clause of schema nodes using the domain-name + type MUST describe when and how these names are resolved to + IP addresses. Note that the resolution of a domain-name value + may require to query multiple DNS records (e.g., A for IPv4 + and AAAA for IPv6). The order of the resolution process and + which DNS record takes precedence can either be defined + explicitly or may depend on the configuration of the + resolver. + + Domain-name values use the US-ASCII encoding. Their canonical + format uses lowercase US-ASCII characters. Internationalized + domain names MUST be A-labels as per RFC 5890."; + reference + "RFC 952: DoD Internet Host Table Specification + RFC 1034: Domain Names - Concepts and Facilities + RFC 1123: Requirements for Internet Hosts -- Application + and Support + RFC 2782: A DNS RR for specifying the location of services + (DNS SRV) + RFC 5890: Internationalized Domain Names in Applications + (IDNA): Definitions and Document Framework"; + } + + typedef host { + type union { + type inet:ip-address; + type inet:domain-name; + } + description + "The host type represents either an IP address or a DNS + domain name."; + } + + typedef uri { + type string; + description + "The uri type represents a Uniform Resource Identifier + (URI) as defined by STD 66. + + Objects using the uri type MUST be in US-ASCII encoding, + and MUST be normalized as described by RFC 3986 Sections + 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary + percent-encoding is removed, and all case-insensitive + characters are set to lowercase except for hexadecimal + digits, which are normalized to uppercase as described in + Section 6.2.2.1. + + The purpose of this normalization is to help provide + unique URIs. Note that this normalization is not + sufficient to provide uniqueness. Two URIs that are + textually distinct after this normalization may still be + equivalent. + + Objects using the uri type may restrict the schemes that + they permit. For example, 'data:' and 'urn:' schemes + might not be appropriate. + + A zero-length URI is not a valid URI. This can be used to + express 'URI absent' where required. + + In the value set and its semantics, this type is equivalent + to the Uri SMIv2 textual convention defined in RFC 5017."; + reference + "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax + RFC 3305: Report from the Joint W3C/IETF URI Planning Interest + Group: Uniform Resource Identifiers (URIs), URLs, + and Uniform Resource Names (URNs): Clarifications + and Recommendations + RFC 5017: MIB Textual Conventions for Uniform Resource + Identifiers (URIs)"; + } + + } diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-interfaces@2018-02-20.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-interfaces@2018-02-20.yang new file mode 100644 index 0000000000000000000000000000000000000000..e53675b9d3caab79e15e1d7453d118df8c177089 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-interfaces@2018-02-20.yang @@ -0,0 +1,1123 @@ +module ietf-interfaces { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces"; + prefix if; + + import ietf-yang-types { + prefix yang; + } + + organization + "IETF NETMOD (Network Modeling) Working Group"; + + contact + "WG Web: <https://datatracker.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + Editor: Martin Bjorklund + <mailto:mbj@tail-f.com>"; + + description + "This module contains a collection of YANG definitions for + managing network interfaces. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8343; see + the RFC itself for full legal notices."; + + revision 2018-02-20 { + description + "Updated to support NMDA."; + reference + "RFC 8343: A YANG Data Model for Interface Management"; + } + + revision 2014-05-08 { + description + "Initial revision."; + reference + "RFC 7223: A YANG Data Model for Interface Management"; + } + + /* + * Typedefs + */ + + typedef interface-ref { + type leafref { + path "/if:interfaces/if:interface/if:name"; + } + description + "This type is used by data models that need to reference + interfaces."; + } + + /* + * Identities + */ + + identity interface-type { + description + "Base identity from which specific interface types are + derived."; + } + + /* + * Features + */ + + feature arbitrary-names { + description + "This feature indicates that the device allows user-controlled + interfaces to be named arbitrarily."; + } + feature pre-provisioning { + description + "This feature indicates that the device supports + pre-provisioning of interface configuration, i.e., it is + possible to configure an interface whose physical interface + hardware is not present on the device."; + } + feature if-mib { + description + "This feature indicates that the device implements + the IF-MIB."; + reference + "RFC 2863: The Interfaces Group MIB"; + } + + /* + * Data nodes + */ + + container interfaces { + description + "Interface parameters."; + + list interface { + key "name"; + + description + "The list of interfaces on the device. + + The status of an interface is available in this list in the + operational state. If the configuration of a + system-controlled interface cannot be used by the system + (e.g., the interface hardware present does not match the + interface type), then the configuration is not applied to + the system-controlled interface shown in the operational + state. If the configuration of a user-controlled interface + cannot be used by the system, the configured interface is + not instantiated in the operational state. + + System-controlled interfaces created by the system are + always present in this list in the operational state, + whether or not they are configured."; + + leaf name { + type string; + description + "The name of the interface. + + A device MAY restrict the allowed values for this leaf, + possibly depending on the type of the interface. + For system-controlled interfaces, this leaf is the + device-specific name of the interface. + + If a client tries to create configuration for a + system-controlled interface that is not present in the + operational state, the server MAY reject the request if + the implementation does not support pre-provisioning of + interfaces or if the name refers to an interface that can + never exist in the system. A Network Configuration + Protocol (NETCONF) server MUST reply with an rpc-error + with the error-tag 'invalid-value' in this case. + + If the device supports pre-provisioning of interface + configuration, the 'pre-provisioning' feature is + advertised. + + If the device allows arbitrarily named user-controlled + interfaces, the 'arbitrary-names' feature is advertised. + + When a configured user-controlled interface is created by + the system, it is instantiated with the same name in the + operational state. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf description { + type string; + description + "A textual description of the interface. + + A server implementation MAY map this leaf to the ifAlias + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifAlias. The definition of + such a mechanism is outside the scope of this document. + + Since ifAlias is defined to be stored in non-volatile + storage, the MIB implementation MUST map ifAlias to the + value of 'description' in the persistently stored + configuration."; + reference + "RFC 2863: The Interfaces Group MIB - ifAlias"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + description + "The type of the interface. + + When an interface entry is created, a server MAY + initialize the type leaf with a valid value, e.g., if it + is possible to derive the type from the name of the + interface. + + If a client tries to set the type of an interface to a + value that can never be used by the system, e.g., if the + type is not supported or if the type does not match the + name of the interface, the server MUST reject the request. + A NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf enabled { + type boolean; + default "true"; + description + "This leaf contains the configured, desired state of the + interface. + + Systems that implement the IF-MIB use the value of this + leaf in the intended configuration to set + IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry + has been initialized, as described in RFC 2863. + + Changes in this leaf in the intended configuration are + reflected in ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf link-up-down-trap-enable { + if-feature if-mib; + type enumeration { + enum enabled { + value 1; + description + "The device will generate linkUp/linkDown SNMP + notifications for this interface."; + } + enum disabled { + value 2; + description + "The device will not generate linkUp/linkDown SNMP + notifications for this interface."; + } + } + description + "Controls whether linkUp/linkDown SNMP notifications + should be generated for this interface. + + If this node is not configured, the value 'enabled' is + operationally used by the server for interfaces that do + not operate on top of any other interface (i.e., there are + no 'lower-layer-if' entries), and 'disabled' otherwise."; + reference + "RFC 2863: The Interfaces Group MIB - + ifLinkUpDownTrapEnable"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + config false; + mandatory true; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + config false; + mandatory true; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + config false; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + config false; + mandatory true; + description + "The ifIndex value for the ifEntry represented by this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + config false; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-ref; + config false; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-ref; + config false; + + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + config false; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + config false; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + + } + } + + /* + * Legacy typedefs + */ + + typedef interface-state-ref { + type leafref { + path "/if:interfaces-state/if:interface/if:name"; + } + status deprecated; + description + "This type is used by data models that need to reference + the operationally present interfaces."; + } + + /* + * Legacy operational state data nodes + */ + + container interfaces-state { + config false; + status deprecated; + description + "Data nodes for the operational state of interfaces."; + + list interface { + key "name"; + status deprecated; + + description + "The list of interfaces on the device. + + System-controlled interfaces created by the system are + always present in this list, whether or not they are + configured."; + + leaf name { + type string; + status deprecated; + description + "The name of the interface. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + status deprecated; + description + "The type of the interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + mandatory true; + status deprecated; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + mandatory true; + status deprecated; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + status deprecated; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + mandatory true; + status deprecated; + description + "The ifIndex value for the ifEntry represented by this + interface."; + + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + status deprecated; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + status deprecated; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + status deprecated; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + status deprecated; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + status deprecated; + + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + status deprecated; + + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + status deprecated; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + } + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-packet-fields@2019-03-04.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-packet-fields@2019-03-04.yang new file mode 100644 index 0000000000000000000000000000000000000000..2fb797bd87bf4ed825f83ec788df707b94c5f68b --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-packet-fields@2019-03-04.yang @@ -0,0 +1,576 @@ +module ietf-packet-fields { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-packet-fields"; + prefix packet-fields; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991 - Common YANG Data Types."; + } + + import ietf-yang-types { + prefix yang; + reference + "RFC 6991 - Common YANG Data Types."; + } + + import ietf-ethertypes { + prefix eth; + reference + "RFC 8519 - YANG Data Model for Network Access Control + Lists (ACLs)."; + } + + organization + "IETF NETMOD (Network Modeling) Working Group."; + + contact + "WG Web: <https://datatracker.ietf.org/wg/netmod/> + WG List: netmod@ietf.org + + Editor: Mahesh Jethanandani + mjethanandani@gmail.com + Editor: Lisa Huang + huangyi_99@yahoo.com + Editor: Sonal Agarwal + sagarwal12@gmail.com + Editor: Dana Blair + dana@blairhome.com"; + + description + "This YANG module defines groupings that are used by + the ietf-access-control-list YANG module. Their usage + is not limited to ietf-access-control-list and can be + used anywhere as applicable. + + Copyright (c) 2019 IETF Trust and the persons identified as + the document authors. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD + License set forth in Section 4.c of the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8519; see + the RFC itself for full legal notices."; + + revision 2019-03-04 { + description + "Initial version."; + reference + "RFC 8519: YANG Data Model for Network Access Control + Lists (ACLs)."; + } + + /* + * Typedefs + */ + typedef operator { + type enumeration { + enum lte { + description + "Less than or equal to."; + } + enum gte { + description + "Greater than or equal to."; + } + enum eq { + description + "Equal to."; + } + enum neq { + description + "Not equal to."; + } + } + description + "The source and destination port range definitions + can be further qualified using an operator. An + operator is needed only if the lower-port is specified + and the upper-port is not specified. The operator + therefore further qualifies the lower-port only."; + } + + /* + * Groupings + */ + grouping port-range-or-operator { + choice port-range-or-operator { + case range { + leaf lower-port { + type inet:port-number; + must '. <= ../upper-port' { + error-message + "The lower-port must be less than or equal to + the upper-port."; + } + mandatory true; + description + "Lower boundary for a port."; + } + leaf upper-port { + type inet:port-number; + mandatory true; + description + "Upper boundary for a port."; + } + } + case operator { + leaf operator { + type operator; + default "eq"; + description + "Operator to be applied on the port below."; + } + leaf port { + type inet:port-number; + mandatory true; + description + "Port number along with the operator on which to + match."; + } + } + description + "Choice of specifying a port range or a single + port along with an operator."; + } + description + "Grouping for port definitions in the form of a + choice statement."; + } + + grouping acl-ip-header-fields { + description + "IP header fields common to IPv4 and IPv6"; + reference + "RFC 791: Internet Protocol."; + + leaf dscp { + type inet:dscp; + description + "Differentiated Services Code Point."; + reference + "RFC 2474: Definition of the Differentiated Services + Field (DS Field) in the IPv4 and IPv6 + Headers."; + } + + leaf ecn { + type uint8 { + range "0..3"; + } + description + "Explicit Congestion Notification."; + reference + "RFC 3168: The Addition of Explicit Congestion + Notification (ECN) to IP."; + } + + leaf length { + type uint16; + description + "In the IPv4 header field, this field is known as the Total + Length. Total Length is the length of the datagram, measured + in octets, including internet header and data. + + In the IPv6 header field, this field is known as the Payload + Length, which is the length of the IPv6 payload, i.e., the rest + of the packet following the IPv6 header, in octets."; + reference + "RFC 791: Internet Protocol + RFC 8200: Internet Protocol, Version 6 (IPv6) Specification."; + } + leaf ttl { + type uint8; + description + "This field indicates the maximum time the datagram is allowed + to remain in the internet system. If this field contains the + value zero, then the datagram must be dropped. + + In IPv6, this field is known as the Hop Limit."; + reference + "RFC 791: Internet Protocol + RFC 8200: Internet Protocol, Version 6 (IPv6) Specification."; + } + leaf protocol { + type uint8; + description + "Internet Protocol number. Refers to the protocol of the + payload. In IPv6, this field is known as 'next-header', + and if extension headers are present, the protocol is + present in the 'upper-layer' header."; + reference + "RFC 791: Internet Protocol + RFC 8200: Internet Protocol, Version 6 (IPv6) Specification."; + } + } + + grouping acl-ipv4-header-fields { + description + "Fields in the IPv4 header."; + leaf ihl { + type uint8 { + range "5..60"; + } + description + "In an IPv4 header field, the Internet Header Length (IHL) is + the length of the internet header in 32-bit words and + thus points to the beginning of the data. Note that the + minimum value for a correct header is 5."; + } + leaf flags { + type bits { + bit reserved { + position 0; + description + "Reserved. Must be zero."; + } + bit fragment { + position 1; + description + "Setting the value to 0 indicates may fragment, while + setting the value to 1 indicates do not fragment."; + } + bit more { + position 2; + description + "Setting the value to 0 indicates this is the last fragment, + and setting the value to 1 indicates more fragments are + coming."; + } + } + description + "Bit definitions for the Flags field in the IPv4 header."; + } + leaf offset { + type uint16 { + range "20..65535"; + } + description + "The fragment offset is measured in units of 8 octets (64 bits). + The first fragment has offset zero. The length is 13 bits"; + } + leaf identification { + type uint16; + description + "An identifying value assigned by the sender to aid in + assembling the fragments of a datagram."; + } + + choice destination-network { + case destination-ipv4-network { + leaf destination-ipv4-network { + type inet:ipv4-prefix; + description + "Destination IPv4 address prefix."; + } + } + description + "Choice of specifying a destination IPv4 address or + referring to a group of IPv4 destination addresses."; + } + + choice source-network { + case source-ipv4-network { + leaf source-ipv4-network { + type inet:ipv4-prefix; + description + "Source IPv4 address prefix."; + } + } + description + "Choice of specifying a source IPv4 address or + referring to a group of IPv4 source addresses."; + } + } + + grouping acl-ipv6-header-fields { + description + "Fields in the IPv6 header."; + + choice destination-network { + case destination-ipv6-network { + leaf destination-ipv6-network { + type inet:ipv6-prefix; + description + "Destination IPv6 address prefix."; + } + } + description + "Choice of specifying a destination IPv6 address + or referring to a group of IPv6 destination + addresses."; + } + + choice source-network { + case source-ipv6-network { + leaf source-ipv6-network { + type inet:ipv6-prefix; + description + "Source IPv6 address prefix."; + } + } + description + "Choice of specifying a source IPv6 address or + referring to a group of IPv6 source addresses."; + } + + leaf flow-label { + type inet:ipv6-flow-label; + description + "IPv6 Flow label."; + } + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation."; + } + + grouping acl-eth-header-fields { + description + "Fields in the Ethernet header."; + leaf destination-mac-address { + type yang:mac-address; + description + "Destination IEEE 802 Media Access Control (MAC) + address."; + } + leaf destination-mac-address-mask { + type yang:mac-address; + description + "Destination IEEE 802 MAC address mask."; + } + leaf source-mac-address { + type yang:mac-address; + description + "Source IEEE 802 MAC address."; + } + leaf source-mac-address-mask { + type yang:mac-address; + description + "Source IEEE 802 MAC address mask."; + } + leaf ethertype { + type eth:ethertype; + description + "The Ethernet Type (or Length) value represented + in the canonical order defined by IEEE 802. + The canonical representation uses lowercase + characters."; + reference + "IEEE 802-2014, Clause 9.2."; + } + reference + "IEEE 802: IEEE Standard for Local and Metropolitan + Area Networks: Overview and Architecture."; + } + + grouping acl-tcp-header-fields { + description + "Collection of TCP header fields that can be used to + set up a match filter."; + leaf sequence-number { + type uint32; + description + "Sequence number that appears in the packet."; + } + leaf acknowledgement-number { + type uint32; + description + "The acknowledgement number that appears in the + packet."; + } + leaf data-offset { + type uint8 { + range "5..15"; + } + description + "Specifies the size of the TCP header in 32-bit + words. The minimum size header is 5 words and + the maximum is 15 words; thus, this gives a + minimum size of 20 bytes and a maximum of 60 + bytes, allowing for up to 40 bytes of options + in the header."; + } + leaf reserved { + type uint8; + description + "Reserved for future use."; + } + leaf flags { + type bits { + bit cwr { + position 1; + description + "The Congestion Window Reduced (CWR) flag is set + by the sending host to indicate that it received + a TCP segment with the ECN-Echo (ECE) flag set + and had responded in the congestion control + mechanism."; + reference + "RFC 3168: The Addition of Explicit Congestion + Notification (ECN) to IP."; + } + bit ece { + position 2; + description + "ECN-Echo has a dual role, depending on the value + of the SYN flag. It indicates the following: if + the SYN flag is set (1), the TCP peer is ECN + capable, and if the SYN flag is clear (0), a packet + with the Congestion Experienced flag set (ECN=11) + in the IP header was received during normal + transmission (added to the header by RFC 3168). + This serves as an indication of network congestion + (or impending congestion) to the TCP sender."; + reference + "RFC 3168: The Addition of Explicit Congestion + Notification (ECN) to IP."; + } + bit urg { + position 3; + description + "Indicates that the Urgent Pointer field is significant."; + } + bit ack { + position 4; + description + "Indicates that the Acknowledgement field is significant. + All packets after the initial SYN packet sent by the + client should have this flag set."; + } + bit psh { + position 5; + description + "Push function. Asks to push the buffered data to the + receiving application."; + } + bit rst { + position 6; + description + "Reset the connection."; + } + bit syn { + position 7; + description + "Synchronize sequence numbers. Only the first packet + sent from each end should have this flag set. Some + other flags and fields change meaning based on this + flag, and some are only valid for when it is set, + and others when it is clear."; + } + bit fin { + position 8; + description + "Last package from the sender."; + } + } + description + "Also known as Control Bits. Contains nine 1-bit flags."; + reference + "RFC 793: Transmission Control Protocol."; + } + leaf window-size { + type uint16; + units "bytes"; + description + "The size of the receive window, which specifies + the number of window size units beyond the segment + identified by the sequence number in the Acknowledgement + field that the sender of this segment is currently + willing to receive."; + } + leaf urgent-pointer { + type uint16; + description + "This field is an offset from the sequence number + indicating the last urgent data byte."; + } + leaf options { + type binary { + length "1..40"; + } + description + "The length of this field is determined by the + Data Offset field. Options have up to three + fields: Option-Kind (1 byte), Option-Length + (1 byte), and Option-Data (variable). The Option-Kind + field indicates the type of option and is the + only field that is not optional. Depending on + what kind of option we are dealing with, + the next two fields may be set: the Option-Length + field indicates the total length of the option, + and the Option-Data field contains the value of + the option, if applicable."; + } + } + + grouping acl-udp-header-fields { + description + "Collection of UDP header fields that can be used + to set up a match filter."; + leaf length { + type uint16; + description + "A field that specifies the length in bytes of + the UDP header and UDP data. The minimum + length is 8 bytes because that is the length of + the header. The field size sets a theoretical + limit of 65,535 bytes (8-byte header plus 65,527 + bytes of data) for a UDP datagram. However, the + actual limit for the data length, which is + imposed by the underlying IPv4 protocol, is + 65,507 bytes (65,535 minus 8-byte UDP header + minus 20-byte IP header). + + In IPv6 jumbograms, it is possible to have + UDP packets of a size greater than 65,535 bytes. + RFC 2675 specifies that the Length field is set + to zero if the length of the UDP header plus + UDP data is greater than 65,535."; + } + } + + grouping acl-icmp-header-fields { + description + "Collection of ICMP header fields that can be + used to set up a match filter."; + leaf type { + type uint8; + description + "Also known as control messages."; + reference + "RFC 792: Internet Control Message Protocol + RFC 4443: Internet Control Message Protocol (ICMPv6) + for Internet Protocol Version 6 (IPv6) + Specification."; + } + leaf code { + type uint8; + description + "ICMP subtype. Also known as control messages."; + reference + "RFC 792: Internet Control Message Protocol + RFC 4443: Internet Control Message Protocol (ICMPv6) + for Internet Protocol Version 6 (IPv6) + Specification."; + } + leaf rest-of-header { + type binary; + description + "Unbounded in length, the contents vary based on the + ICMP type and code. Also referred to as 'Message Body' + in ICMPv6."; + reference + "RFC 792: Internet Control Message Protocol + RFC 4443: Internet Control Message Protocol (ICMPv6) + for Internet Protocol Version 6 (IPv6) + Specification."; + } + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-yang-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-yang-types@2013-07-15.yang new file mode 100644 index 0000000000000000000000000000000000000000..956562a7b342055127961732d8bde4be21c80d7d --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-yang-types@2013-07-15.yang @@ -0,0 +1,475 @@ + module ietf-yang-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types"; + prefix "yang"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - yang-identifier + - hex-string + - uuid + - dotted-quad"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of counter and gauge types ***/ + + typedef counter32 { + type uint32; + description + "The counter32 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter32 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter32 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter32. + + In the value set and its semantics, this type is equivalent + to the Counter32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter32 { + type yang:counter32; + default "0"; + description + "The zero-based-counter32 type represents a counter32 + that has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter32 textual convention of the SMIv2."; + reference + "RFC 4502: Remote Network Monitoring Management Information + Base Version 2"; + } + + typedef counter64 { + type uint64; + description + "The counter64 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter64 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter64 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter64. + + In the value set and its semantics, this type is equivalent + to the Counter64 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter64 { + type yang:counter64; + default "0"; + description + "The zero-based-counter64 type represents a counter64 that + has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter64 textual convention of the SMIv2."; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + typedef gauge32 { + type uint32; + description + "The gauge32 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^32-1 (4294967295 decimal), and + the minimum value cannot be smaller than 0. The value of + a gauge32 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge32 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the Gauge32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef gauge64 { + type uint64; + description + "The gauge64 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^64-1 (18446744073709551615), and + the minimum value cannot be smaller than 0. The value of + a gauge64 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge64 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the CounterBasedGauge64 SMIv2 textual convention defined + in RFC 2856"; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + /*** collection of identifier-related types ***/ + + typedef object-identifier { + type string { + pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))' + + '(\.(0|([1-9]\d*)))*'; + } + description + "The object-identifier type represents administratively + assigned names in a registration-hierarchical-name tree. + + Values of this type are denoted as a sequence of numerical + non-negative sub-identifier values. Each sub-identifier + value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers + are separated by single dots and without any intermediate + whitespace. + + The ASN.1 standard restricts the value space of the first + sub-identifier to 0, 1, or 2. Furthermore, the value space + of the second sub-identifier is restricted to the range + 0 to 39 if the first sub-identifier is 0 or 1. Finally, + the ASN.1 standard requires that an object identifier + has always at least two sub-identifiers. The pattern + captures these restrictions. + + Although the number of sub-identifiers is not limited, + module designers should realize that there may be + implementations that stick with the SMIv2 limit of 128 + sub-identifiers. + + This type is a superset of the SMIv2 OBJECT IDENTIFIER type + since it is not restricted to 128 sub-identifiers. Hence, + this type SHOULD NOT be used to represent the SMIv2 OBJECT + IDENTIFIER type; the object-identifier-128 type SHOULD be + used instead."; + reference + "ISO9834-1: Information technology -- Open Systems + Interconnection -- Procedures for the operation of OSI + Registration Authorities: General procedures and top + arcs of the ASN.1 Object Identifier tree"; + } + + typedef object-identifier-128 { + type object-identifier { + pattern '\d*(\.\d*){1,127}'; + } + description + "This type represents object-identifiers restricted to 128 + sub-identifiers. + + In the value set and its semantics, this type is equivalent + to the OBJECT IDENTIFIER type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef yang-identifier { + type string { + length "1..max"; + pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*'; + pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*'; + } + description + "A YANG identifier string as defined by the 'identifier' + rule in Section 12 of RFC 6020. An identifier must + start with an alphabetic character or an underscore + followed by an arbitrary sequence of alphabetic or + numeric characters, underscores, hyphens, or dots. + + A YANG identifier MUST NOT start with any possible + combination of the lowercase or uppercase character + sequence 'xml'."; + reference + "RFC 6020: YANG - A Data Modeling Language for the Network + Configuration Protocol (NETCONF)"; + } + + /*** collection of types related to date and time***/ + + typedef date-and-time { + type string { + pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?' + + '(Z|[\+\-]\d{2}:\d{2})'; + } + description + "The date-and-time type is a profile of the ISO 8601 + standard for representation of dates and times using the + Gregorian calendar. The profile is defined by the + date-time production in Section 5.6 of RFC 3339. + + The date-and-time type is compatible with the dateTime XML + schema type with the following notable exceptions: + + (a) The date-and-time type does not allow negative years. + + (b) The date-and-time time-offset -00:00 indicates an unknown + time zone (see RFC 3339) while -00:00 and +00:00 and Z + all represent the same time zone in dateTime. + + (c) The canonical format (see below) of data-and-time values + differs from the canonical format used by the dateTime XML + schema type, which requires all times to be in UTC using + the time-offset 'Z'. + + This type is not equivalent to the DateAndTime textual + convention of the SMIv2 since RFC 3339 uses a different + separator between full-date and full-time and provides + higher resolution of time-secfrac. + + The canonical format for date-and-time values with a known time + zone uses a numeric time zone offset that is calculated using + the device's configured known offset to UTC time. A change of + the device's offset to UTC time will cause date-and-time values + to change accordingly. Such changes might happen periodically + in case a server follows automatically daylight saving time + (DST) time zone offset changes. The canonical format for + date-and-time values with an unknown time zone (usually + referring to the notion of local time) uses the time-offset + -00:00."; + reference + "RFC 3339: Date and Time on the Internet: Timestamps + RFC 2579: Textual Conventions for SMIv2 + XSD-TYPES: XML Schema Part 2: Datatypes Second Edition"; + } + + typedef timeticks { + type uint32; + description + "The timeticks type represents a non-negative integer that + represents the time, modulo 2^32 (4294967296 decimal), in + hundredths of a second between two epochs. When a schema + node is defined that uses this type, the description of + the schema node identifies both of the reference epochs. + + In the value set and its semantics, this type is equivalent + to the TimeTicks type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef timestamp { + type yang:timeticks; + description + "The timestamp type represents the value of an associated + timeticks schema node at which a specific occurrence + happened. The specific occurrence must be defined in the + description of any schema node defined using this type. When + the specific occurrence occurred prior to the last time the + associated timeticks attribute was zero, then the timestamp + value is zero. Note that this requires all timestamp values + to be reset to zero when the value of the associated timeticks + attribute reaches 497+ days and wraps around to zero. + + The associated timeticks schema node must be specified + in the description of any schema node using this type. + + In the value set and its semantics, this type is equivalent + to the TimeStamp textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of generic address types ***/ + + typedef phys-address { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "Represents media- or physical-level addresses represented + as a sequence octets, each octet represented by two hexadecimal + numbers. Octets are separated by colons. The canonical + representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the PhysAddress textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + typedef mac-address { + type string { + pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'; + } + description + "The mac-address type represents an IEEE 802 MAC address. + The canonical representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the MacAddress textual convention of the SMIv2."; + reference + "IEEE 802: IEEE Standard for Local and Metropolitan Area + Networks: Overview and Architecture + RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of XML-specific types ***/ + + typedef xpath1.0 { + type string; + description + "This type represents an XPATH 1.0 expression. + + When a schema node is defined that uses this type, the + description of the schema node MUST specify the XPath + context in which the XPath expression is evaluated."; + reference + "XPATH: XML Path Language (XPath) Version 1.0"; + } + + /*** collection of string types ***/ + + typedef hex-string { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "A hexadecimal string with octets represented as hex digits + separated by colons. The canonical representation uses + lowercase characters."; + } + + typedef uuid { + type string { + pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'; + } + description + "A Universally Unique IDentifier in the string representation + defined in RFC 4122. The canonical representation uses + lowercase characters. + + The following is an example of a UUID in string representation: + f81d4fae-7dec-11d0-a765-00a0c91e6bf6 + "; + reference + "RFC 4122: A Universally Unique IDentifier (UUID) URN + Namespace"; + } + + typedef dotted-quad { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'; + } + description + "An unsigned 32-bit number expressed in the dotted-quad + notation, i.e., four octets written as decimal numbers + and separated with the '.' (full stop) character."; + } + } diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py new file mode 100644 index 0000000000000000000000000000000000000000..2282de557c1a80227c7d50e7c125ab4fe538bd28 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py @@ -0,0 +1,53 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from flask import request +from flask.json import jsonify +from flask_restful import Resource +from common.tools.context_queries.Device import get_device +from context.client.ContextClient import ContextClient +from ..tools.Authentication import HTTP_AUTH +from ..tools.HttpStatusCodes import HTTP_OK, HTTP_SERVERERROR +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class Hardware(Resource): + @HTTP_AUTH.login_required + def get(self, device_uuid : str): + LOGGER.debug('Device UUID: {:s}'.format(str(device_uuid))) + LOGGER.debug('Request: {:s}'.format(str(request))) + + try: + context_client = ContextClient() + device = get_device( + context_client, device_uuid, rw_copy=False, + include_endpoints=False, include_config_rules=False, include_components=True + ) + if device is None: + raise Exception('Device({:s}) not found in database'.format(str(device_uuid))) + + yang_handler = YangHandler() + hardware_reply = yang_handler.compose(device) + yang_handler.destroy() + + response = jsonify(hardware_reply) + response.status_code = HTTP_OK + except Exception as e: # pylint: disable=broad-except + MSG = 'Something went wrong Retrieving Hardware of Device({:s})' + LOGGER.exception(MSG.format(str(device_uuid))) + response = jsonify({'error': str(e)}) + response.status_code = HTTP_SERVERERROR + return response diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/HardwareMultipleDevices.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/HardwareMultipleDevices.py new file mode 100644 index 0000000000000000000000000000000000000000..b1beff518bb3997fc04a79e78c3467b47bd51483 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/HardwareMultipleDevices.py @@ -0,0 +1,50 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from flask import request +from flask.json import jsonify +from flask_restful import Resource +from common.proto.context_pb2 import Empty +from context.client.ContextClient import ContextClient +from ..tools.Authentication import HTTP_AUTH +from ..tools.HttpStatusCodes import HTTP_OK, HTTP_SERVERERROR +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class HardwareMultipleDevices(Resource): + @HTTP_AUTH.login_required + def get(self): + + LOGGER.debug('Request: {:s}'.format(str(request))) + + try: + context_client = ContextClient() + list_devices = context_client.ListDevices(Empty()) + LOGGER.info('Request: {:s}'.format(str(list_devices))) + hardware_list_reply = [] + yang_handler = YangHandler() + for device in list_devices.devices: + hardware_reply = yang_handler.compose(device) + hardware_list_reply.append(hardware_reply) + + yang_handler.destroy() + response = jsonify(hardware_list_reply) + response.status_code = HTTP_OK + except Exception as e: # pylint: disable=broad-except + MSG = 'Something went wrong Retrieving Hardware of Devices({:s})' + response = jsonify({'error': str(e)}) + response.status_code = HTTP_SERVERERROR + return response diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py new file mode 100644 index 0000000000000000000000000000000000000000..7662261e97b35958f036dc0e69913af7947b9403 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py @@ -0,0 +1,131 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import Device +from typing import Dict, Optional +import datetime +import json +import logging +import libyang +import os +import re + +LOGGER = logging.getLogger(__name__) +YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang') +YANG_MODULES = [ + 'iana-hardware', + 'ietf-hardware', + 'ietf-network-hardware-inventory' +] + +class YangHandler: + def __init__(self) -> None: + self._yang_context = libyang.Context(YANG_DIR) + for yang_module_name in YANG_MODULES: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + self._yang_context.load_module(yang_module_name).feature_enable_all() + + def parse_to_dict(self, message : Dict) -> Dict: + yang_module = self._yang_context.get_module('ietf-network-hardware-inventory') + dnode : Optional[libyang.DNode] = yang_module.parse_data_dict( + message, validate_present=True, validate=True, strict=True + ) + if dnode is None: raise Exception('Unable to parse Message({:s})'.format(str(message))) + message = dnode.print_dict() + dnode.free() + return message + + @staticmethod + def convert_to_iso_date(date_str: str) -> Optional[str]: + date_str = date_str.strip('"') + pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[\+\-]\d{2}:\d{2})" + if re.match(pattern, date_str): + return date_str + else: + try: + datetime_obj = datetime.datetime.strptime(date_str, "%Y-%m-%d") + iso_date = datetime_obj.isoformat() + "Z" + return iso_date + except ValueError: + return None + + def compose(self, device : Device) -> Dict: + hardware = self._yang_context.create_data_path('/ietf-network-hardware-inventory:network-hardware-inventory') + network_elements = hardware.create_path('network-elements') + + network_element = network_elements.create_path('network-element[uuid="{:s}"]'.format(device.device_id.device_uuid.uuid)) + network_element.create_path('uuid', device.device_id.device_uuid.uuid) + network_element.create_path('name', device.name) + components = network_element.create_path('components') + physical_index = 1 + + for component in device.components: + attributes = component.attributes + component_new = components.create_path('component[uuid="{:s}"]'.format(component.component_uuid.uuid)) + component_new.create_path('name', component.name) + component_type = component.type + if component_type == "TRANSCEIVER" : + component_type = "module" + if component_type == "FRU" : + component_type = "slack" + + component_type = component_type.replace("_", "-").lower() + component_type = 'iana-hardware:' + component_type + component_new.create_path('class', component_type) + physical_index += 1 + component_new.create_path('description', attributes["description"].replace('/"',"")) + if "CHASSIS" not in component.type: + parent_component_references = component_new.create_path('parent-component-references') + parent = parent_component_references.create_path('component-reference[index="{:d}"]'.format(physical_index)) + for component_parent in device.components: + if component.parent == component_parent.name : + parent.create_path('uuid', component_parent.component_uuid.uuid) + break + if attributes["mfg-date"] != "": + mfg_date = self.convert_to_iso_date(attributes["mfg-date"]) + component_new.create_path('mfg-date', mfg_date) + + component_new.create_path('hardware-rev', attributes["hardware-rev"]) + component_new.create_path('software-rev', attributes["software-rev"]) + component_new.create_path('firmware-rev', attributes["firmware-version"]) + component_new.create_path('serial-num', attributes["serial-num"]) + component_new.create_path('mfg-name', attributes["mfg-name"]) + if attributes["removable"]: + removable = attributes["removable"].lower() + if 'true' in removable: + component_new.create_path('is-fru', True) + elif 'false' in removable: + component_new.create_path('is-fru', False) + + if attributes["id"]: + try: + if "CHASSIS" in component.type : + component_new.create_path('parent-rel-pos', 0) + else: + parent_rel_pos = int(attributes["id"].replace("\"", "")) + component_new.create_path('parent-rel-pos', parent_rel_pos) + except ValueError: + LOGGER.info('ERROR:{:s} '.format(component.name )) + continue + + component_new.create_path('uri', component.name) + component_new.create_path('uuid', component.component_uuid.uuid) + for child in device.components: + if component.name == child.parent : + component_new.create_path('contained-child', child.component_uuid.uuid) + + return json.loads(hardware.print_mem('json')) + + def destroy(self) -> None: + self._yang_context.destroy() diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba774650e89e26609573a364be520c2d1bd6df84 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nbi.service.rest_server.nbi_plugins.ietf_hardware.Hardware import Hardware +from nbi.service.rest_server.nbi_plugins.ietf_hardware.HardwareMultipleDevices import HardwareMultipleDevices +from nbi.service.rest_server.RestServer import RestServer + +URL_PREFIX_DEVICE = "/restconf/data/device=<path:device_uuid>/ietf-network-hardware-inventory:network-hardware-inventory" +URL_PREFIX_HARDWARE = "/restconf/data/ietf-network-hardware-inventory:network-hardware-inventory" + +def register_ietf_hardware(rest_server: RestServer): + rest_server.add_resource(Hardware, URL_PREFIX_DEVICE) + rest_server.add_resource(HardwareMultipleDevices, URL_PREFIX_HARDWARE) diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/iana-hardware@2018-03-13.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/iana-hardware@2018-03-13.yang new file mode 100644 index 0000000000000000000000000000000000000000..5cd52648ff9b676dc04a98b5b85bd180f88f8a6e --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/iana-hardware@2018-03-13.yang @@ -0,0 +1,189 @@ +module iana-hardware { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:iana-hardware"; + prefix ianahw; + + organization "IANA"; + contact + " Internet Assigned Numbers Authority + + Postal: ICANN + 12025 Waterfront Drive, Suite 300 + Los Angeles, CA 90094-2536 + United States of America + + Tel: +1 310 301 5800 + E-Mail: iana@iana.org>"; + + description + "IANA-defined identities for hardware class. + + The latest revision of this YANG module can be obtained from + the IANA website. + + Requests for new values should be made to IANA via + email (iana@iana.org). + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + The initial version of this YANG module is part of RFC 8348; + see the RFC itself for full legal notices."; + reference + "https://www.iana.org/assignments/yang-parameters"; + + revision 2018-03-13 { + description + "Initial revision."; + reference + "RFC 8348: A YANG Data Model for Hardware Management"; + } + + /* + * Identities + */ + + identity hardware-class { + description + "This identity is the base for all hardware class + identifiers."; + } + + identity unknown { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is unknown + to the server."; + } + + identity chassis { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is an + overall container for networking equipment. Any class of + physical component, except a stack, may be contained within a + chassis; a chassis may only be contained within a stack."; + } + + identity backplane { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of device for aggregating and forwarding networking traffic, + such as a shared backplane in a modular ethernet switch. Note + that an implementation may model a backplane as a single + physical component, which is actually implemented as multiple + discrete physical components (within a chassis or stack)."; + } + + identity container { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is capable + of containing one or more removable physical entities, + possibly of different types. For example, each (empty or + full) slot in a chassis will be modeled as a container. Note + that all removable physical components should be modeled + within a container component, such as field-replaceable + modules, fans, or power supplies. Note that all known + containers should be modeled by the agent, including empty + containers."; + } + + identity power-supply { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is a + power-supplying component."; + } + + identity fan { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is a fan or + other heat-reduction component."; + } + + identity sensor { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of sensor, such as a temperature sensor within a router + chassis."; + } + + identity module { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of self-contained sub-system. If a module component is + removable, then it should be modeled within a container + + component; otherwise, it should be modeled directly within + another physical component (e.g., a chassis or another + module)."; + } + + identity port { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of networking port capable of receiving and/or transmitting + networking traffic."; + } + + identity stack { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of super-container (possibly virtual) intended to group + together multiple chassis entities. A stack may be realized + by a virtual cable, a real interconnect cable attached to + multiple chassis, or multiple interconnect cables. A stack + should not be modeled within any other physical components, + but a stack may be contained within another stack. Only + chassis components should be contained within a stack."; + } + + identity cpu { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of central processing unit."; + } + + identity energy-object { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of energy object, i.e., it is a piece of equipment that is + part of or attached to a communications network that is + monitored, it is controlled, or it aids in the management of + another device for Energy Management."; + } + + identity battery { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of battery."; + } + + identity storage-drive { + base ianahw:hardware-class; + description + "This identity is applicable if the hardware class is some sort + of component with data storage capability as its main + functionality, e.g., hard disk drive (HDD), solid-state device + (SSD), solid-state hybrid drive (SSHD), object storage device + (OSD), or other."; + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-hardware@2018-03-13.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-hardware@2018-03-13.yang new file mode 100644 index 0000000000000000000000000000000000000000..4f984b616a61bb169f5ac132c3002ae1033aae7e --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-hardware@2018-03-13.yang @@ -0,0 +1,1194 @@ +module ietf-hardware { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-hardware"; + prefix hw; + + import ietf-inet-types { + prefix inet; + } + import ietf-yang-types { + prefix yang; + } + import iana-hardware { + prefix ianahw; + } + + organization + "IETF NETMOD (Network Modeling) Working Group"; + + contact + "WG Web: <https://datatracker.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + Editor: Andy Bierman + <mailto:andy@yumaworks.com> + + Editor: Martin Bjorklund + <mailto:mbj@tail-f.com> + + Editor: Jie Dong + <mailto:jie.dong@huawei.com> + + Editor: Dan Romascanu + <mailto:dromasca@gmail.com>"; + + description + "This module contains a collection of YANG definitions for + managing hardware. + + This data model is designed for the Network Management Datastore + Architecture (NMDA) defined in RFC 8342. + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8348; see + the RFC itself for full legal notices."; + + revision 2018-03-13 { + description + "Initial revision."; + reference + "RFC 8348: A YANG Data Model for Hardware Management"; + } + + /* + * Features + */ + + feature entity-mib { + description + "This feature indicates that the device implements + the ENTITY-MIB."; + reference + "RFC 6933: Entity MIB (Version 4)"; + } + + feature hardware-state { + description + "Indicates that ENTITY-STATE-MIB objects are supported"; + reference + "RFC 4268: Entity State MIB"; + } + + feature hardware-sensor { + description + "Indicates that ENTITY-SENSOR-MIB objects are supported"; + reference + "RFC 3433: Entity Sensor Management Information Base"; + } + + /* + * Typedefs + */ + + typedef admin-state { + type enumeration { + enum unknown { + value 1; + description + "The resource is unable to report administrative state."; + } + enum locked { + value 2; + description + "The resource is administratively prohibited from use."; + } + enum shutting-down { + value 3; + description + "The resource usage is administratively limited to current + instances of use."; + } + enum unlocked { + value 4; + description + "The resource is not administratively prohibited from + use."; + } + } + description + "Represents the various possible administrative states."; + reference + "RFC 4268: Entity State MIB - EntityAdminState"; + } + + typedef oper-state { + type enumeration { + enum unknown { + value 1; + description + "The resource is unable to report its operational state."; + } + enum disabled { + value 2; + description + "The resource is totally inoperable."; + } + enum enabled { + value 3; + + description + "The resource is partially or fully operable."; + } + enum testing { + value 4; + description + "The resource is currently being tested and cannot + therefore report whether or not it is operational."; + } + } + description + "Represents the possible values of operational states."; + reference + "RFC 4268: Entity State MIB - EntityOperState"; + } + + typedef usage-state { + type enumeration { + enum unknown { + value 1; + description + "The resource is unable to report usage state."; + } + enum idle { + value 2; + description + "The resource is servicing no users."; + } + enum active { + value 3; + description + "The resource is currently in use, and it has sufficient + spare capacity to provide for additional users."; + } + enum busy { + value 4; + description + "The resource is currently in use, but it currently has no + spare capacity to provide for additional users."; + } + } + description + "Represents the possible values of usage states."; + reference + "RFC 4268: Entity State MIB - EntityUsageState"; + } + + typedef alarm-state { + type bits { + bit unknown { + position 0; + description + "The resource is unable to report alarm state."; + } + bit under-repair { + position 1; + description + "The resource is currently being repaired, which, depending + on the implementation, may make the other values in this + bit string not meaningful."; + } + bit critical { + position 2; + description + "One or more critical alarms are active against the + resource."; + } + bit major { + position 3; + description + "One or more major alarms are active against the + resource."; + } + bit minor { + position 4; + description + "One or more minor alarms are active against the + resource."; + } + bit warning { + position 5; + description + "One or more warning alarms are active against the + resource."; + } + bit indeterminate { + position 6; + description + "One or more alarms of whose perceived severity cannot be + determined are active against this resource."; + } + } + description + "Represents the possible values of alarm states. An alarm is a + persistent indication of an error or warning condition. + + When no bits of this attribute are set, then no active alarms + are known against this component and it is not under repair."; + reference + "RFC 4268: Entity State MIB - EntityAlarmStatus"; + } + + typedef standby-state { + type enumeration { + enum unknown { + value 1; + description + "The resource is unable to report standby state."; + } + enum hot-standby { + value 2; + description + "The resource is not providing service, but it will be + immediately able to take over the role of the resource to + be backed up, without the need for initialization + activity, and will contain the same information as the + resource to be backed up."; + } + enum cold-standby { + value 3; + description + "The resource is to back up another resource, but it will + not be immediately able to take over the role of a + resource to be backed up and will require some + initialization activity."; + } + enum providing-service { + value 4; + description + "The resource is providing service."; + } + } + description + "Represents the possible values of standby states."; + reference + "RFC 4268: Entity State MIB - EntityStandbyStatus"; + } + + typedef sensor-value-type { + type enumeration { + enum other { + value 1; + description + "A measure other than those listed below."; + } + enum unknown { + value 2; + description + "An unknown measurement or arbitrary, relative numbers"; + } + enum volts-AC { + value 3; + description + "A measure of electric potential (alternating current)."; + } + enum volts-DC { + value 4; + description + "A measure of electric potential (direct current)."; + } + enum amperes { + value 5; + description + "A measure of electric current."; + } + enum watts { + value 6; + description + "A measure of power."; + } + enum hertz { + value 7; + description + "A measure of frequency."; + } + enum celsius { + value 8; + description + "A measure of temperature."; + } + enum percent-RH { + value 9; + description + "A measure of percent relative humidity."; + } + enum rpm { + value 10; + description + "A measure of shaft revolutions per minute."; + } + enum cmm { + value 11; + description + "A measure of cubic meters per minute (airflow)."; + } + enum truth-value { + value 12; + description + "Value is one of 1 (true) or 2 (false)"; + } + } + description + "A node using this data type represents the sensor measurement + data type associated with a physical sensor value. The actual + data units are determined by examining a node of this type + together with the associated sensor-value-scale node. + + A node of this type SHOULD be defined together with nodes of + type sensor-value-scale and type sensor-value-precision. + These three types are used to identify the semantics of a node + of type sensor-value."; + reference + "RFC 3433: Entity Sensor Management Information Base - + EntitySensorDataType"; + } + + typedef sensor-value-scale { + type enumeration { + enum yocto { + value 1; + description + "Data scaling factor of 10^-24."; + } + enum zepto { + value 2; + description + "Data scaling factor of 10^-21."; + } + enum atto { + value 3; + description + "Data scaling factor of 10^-18."; + } + enum femto { + value 4; + description + "Data scaling factor of 10^-15."; + } + enum pico { + value 5; + description + "Data scaling factor of 10^-12."; + } + enum nano { + value 6; + description + "Data scaling factor of 10^-9."; + } + enum micro { + value 7; + description + "Data scaling factor of 10^-6."; + } + enum milli { + value 8; + description + "Data scaling factor of 10^-3."; + } + enum units { + value 9; + description + "Data scaling factor of 10^0."; + } + enum kilo { + value 10; + description + "Data scaling factor of 10^3."; + } + enum mega { + value 11; + description + "Data scaling factor of 10^6."; + } + enum giga { + value 12; + description + "Data scaling factor of 10^9."; + } + enum tera { + value 13; + description + "Data scaling factor of 10^12."; + } + enum peta { + value 14; + description + "Data scaling factor of 10^15."; + } + enum exa { + value 15; + description + "Data scaling factor of 10^18."; + } + enum zetta { + value 16; + description + "Data scaling factor of 10^21."; + } + enum yotta { + value 17; + description + "Data scaling factor of 10^24."; + } + } + description + "A node using this data type represents a data scaling factor, + represented with an International System of Units (SI) prefix. + The actual data units are determined by examining a node of + this type together with the associated sensor-value-type. + + A node of this type SHOULD be defined together with nodes of + type sensor-value-type and type sensor-value-precision. + Together, associated nodes of these three types are used to + identify the semantics of a node of type sensor-value."; + reference + "RFC 3433: Entity Sensor Management Information Base - + EntitySensorDataScale"; + } + + typedef sensor-value-precision { + type int8 { + range "-8 .. 9"; + } + description + "A node using this data type represents a sensor value + precision range. + + A node of this type SHOULD be defined together with nodes of + type sensor-value-type and type sensor-value-scale. Together, + associated nodes of these three types are used to identify the + semantics of a node of type sensor-value. + + If a node of this type contains a value in the range 1 to 9, + it represents the number of decimal places in the fractional + part of an associated sensor-value fixed-point number. + + If a node of this type contains a value in the range -8 to -1, + it represents the number of accurate digits in the associated + sensor-value fixed-point number. + + The value zero indicates the associated sensor-value node is + not a fixed-point number. + + Server implementers must choose a value for the associated + sensor-value-precision node so that the precision and accuracy + of the associated sensor-value node is correctly indicated. + + For example, a component representing a temperature sensor + that can measure 0 to 100 degrees C in 0.1 degree + increments, +/- 0.05 degrees, would have a + sensor-value-precision value of '1', a sensor-value-scale + value of 'units', and a sensor-value ranging from '0' to + '1000'. The sensor-value would be interpreted as + 'degrees C * 10'."; + reference + "RFC 3433: Entity Sensor Management Information Base - + EntitySensorPrecision"; + } + + typedef sensor-value { + type int32 { + range "-1000000000 .. 1000000000"; + } + description + "A node using this data type represents a sensor value. + + A node of this type SHOULD be defined together with nodes of + type sensor-value-type, type sensor-value-scale, and + type sensor-value-precision. Together, associated nodes of + those three types are used to identify the semantics of a node + of this data type. + + The semantics of a node using this data type are determined by + the value of the associated sensor-value-type node. + + If the associated sensor-value-type node is equal to 'voltsAC', + 'voltsDC', 'amperes', 'watts', 'hertz', 'celsius', or 'cmm', + then a node of this type MUST contain a fixed-point number + ranging from -999,999,999 to +999,999,999. The value + -1000000000 indicates an underflow error. The value + +1000000000 indicates an overflow error. The + sensor-value-precision indicates how many fractional digits + are represented in the associated sensor-value node. + + If the associated sensor-value-type node is equal to + 'percentRH', then a node of this type MUST contain a number + ranging from 0 to 100. + + If the associated sensor-value-type node is equal to 'rpm', + then a node of this type MUST contain a number ranging from + -999,999,999 to +999,999,999. + + If the associated sensor-value-type node is equal to + 'truth-value', then a node of this type MUST contain either the + value 1 (true) or the value 2 (false). + + If the associated sensor-value-type node is equal to 'other' or + 'unknown', then a node of this type MUST contain a number + ranging from -1000000000 to 1000000000."; + reference + "RFC 3433: Entity Sensor Management Information Base - + EntitySensorValue"; + } + + typedef sensor-status { + type enumeration { + enum ok { + value 1; + description + "Indicates that the server can obtain the sensor value."; + } + enum unavailable { + value 2; + description + "Indicates that the server presently cannot obtain the + sensor value."; + } + enum nonoperational { + value 3; + description + "Indicates that the server believes the sensor is broken. + The sensor could have a hard failure (disconnected wire) + or a soft failure such as out-of-range, jittery, or wildly + fluctuating readings."; + } + } + description + "A node using this data type represents the operational status + of a physical sensor."; + reference + "RFC 3433: Entity Sensor Management Information Base - + EntitySensorStatus"; + } + + /* + * Data nodes + */ + + container hardware { + description + "Data nodes representing components. + + If the server supports configuration of hardware components, + then this data model is instantiated in the configuration + datastores supported by the server. The leaf-list 'datastore' + for the module 'ietf-hardware' in the YANG library provides + this information."; + + leaf last-change { + type yang:date-and-time; + config false; + description + "The time the '/hardware/component' list changed in the + operational state."; + } + + list component { + key name; + description + "List of components. + + When the server detects a new hardware component, it + initializes a list entry in the operational state. + + If the server does not support configuration of hardware + components, list entries in the operational state are + initialized with values for all nodes as detected by the + implementation. + + Otherwise, this procedure is followed: + + 1. If there is an entry in the '/hardware/component' list + in the intended configuration with values for the nodes + 'class', 'parent', and 'parent-rel-pos' that are equal + to the detected values, then the list entry in the + operational state is initialized with the configured + values, including the 'name'. + + 2. Otherwise (i.e., there is no matching configuration + entry), the list entry in the operational state is + initialized with values for all nodes as detected by + the implementation. + + If the '/hardware/component' list in the intended + configuration is modified, then the system MUST behave as if + it re-initializes itself and follow the procedure in (1)."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalEntry"; + + leaf name { + type string; + description + "The name assigned to this component. + + This name is not required to be the same as + entPhysicalName."; + } + + leaf class { + type identityref { + base ianahw:hardware-class; + } + mandatory true; + description + "An indication of the general hardware type of the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalClass"; + } + + leaf physical-index { + if-feature entity-mib; + type int32 { + range "1..2147483647"; + } + config false; + description + "The entPhysicalIndex for the entPhysicalEntry represented + by this list entry."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalIndex"; + } + + leaf description { + type string; + config false; + description + "A textual description of the component. This node should + contain a string that identifies the manufacturer's name + for the component and should be set to a distinct value + for each version or model of the component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalDescr"; + } + + leaf parent { + type leafref { + path "../../component/name"; + require-instance false; + } + description + "The name of the component that physically contains this + component. + + If this leaf is not instantiated, it indicates that this + component is not contained in any other component. + + In the event that a physical component is contained by + more than one physical component (e.g., double-wide + modules), this node contains the name of one of these + components. An implementation MUST use the same name + every time this node is instantiated."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalContainedIn"; + } + + leaf parent-rel-pos { + type int32 { + range "0 .. 2147483647"; + } + description + "An indication of the relative position of this child + component among all its sibling components. Sibling + components are defined as components that: + + o share the same value of the 'parent' node and + + o share a common base identity for the 'class' node. + + Note that the last rule gives implementations flexibility + in how components are numbered. For example, some + implementations might have a single number series for all + components derived from 'ianahw:port', while some others + might have different number series for different + components with identities derived from 'ianahw:port' (for + example, one for registered jack 45 (RJ45) and one for + small form-factor pluggable (SFP))."; + + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalParentRelPos"; + } + + leaf-list contains-child { + type leafref { + path "../../component/name"; + } + config false; + description + "The name of the contained component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalChildIndex"; + } + + leaf hardware-rev { + type string; + config false; + description + "The vendor-specific hardware revision string for the + component. The preferred value is the hardware revision + identifier actually printed on the component itself (if + present)."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalHardwareRev"; + } + + leaf firmware-rev { + type string; + config false; + description + "The vendor-specific firmware revision string for the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalFirmwareRev"; + } + + leaf software-rev { + type string; + config false; + + description + "The vendor-specific software revision string for the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalSoftwareRev"; + } + + leaf serial-num { + type string; + config false; + description + "The vendor-specific serial number string for the + component. The preferred value is the serial number + string actually printed on the component itself (if + present)."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalSerialNum"; + } + + leaf mfg-name { + type string; + config false; + description + "The name of the manufacturer of this physical component. + The preferred value is the manufacturer name string + actually printed on the component itself (if present). + + Note that comparisons between instances of the + 'model-name', 'firmware-rev', 'software-rev', and + 'serial-num' nodes are only meaningful amongst components + with the same value of 'mfg-name'. + + If the manufacturer name string associated with the + physical component is unknown to the server, then this + node is not instantiated."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgName"; + } + + leaf model-name { + type string; + config false; + description + "The vendor-specific model name identifier string + associated with this physical component. The preferred + value is the customer-visible part number, which may be + printed on the component itself. + If the model name string associated with the physical + component is unknown to the server, then this node is not + instantiated."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalModelName"; + } + + leaf alias { + type string; + description + "An 'alias' name for the component, as specified by a + network manager, that provides a non-volatile 'handle' for + the component. + + If no configured value exists, the server MAY set the + value of this node to a locally unique value in the + operational state. + + A server implementation MAY map this leaf to the + entPhysicalAlias MIB object. Such an implementation needs + to use some mechanism to handle the differences in size + and characters allowed between this leaf and + entPhysicalAlias. The definition of such a mechanism is + outside the scope of this document."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalAlias"; + } + + leaf asset-id { + type string; + description + "This node is a user-assigned asset tracking identifier for + the component. + + A server implementation MAY map this leaf to the + entPhysicalAssetID MIB object. Such an implementation + needs to use some mechanism to handle the differences in + size and characters allowed between this leaf and + entPhysicalAssetID. The definition of such a mechanism is + outside the scope of this document."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalAssetID"; + } + + leaf is-fru { + type boolean; + config false; + + description + "This node indicates whether or not this component is + considered a 'field-replaceable unit' by the vendor. If + this node contains the value 'true', then this component + identifies a field-replaceable unit. For all components + that are permanently contained within a field-replaceable + unit, the value 'false' should be returned for this + node."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalIsFRU"; + } + + leaf mfg-date { + type yang:date-and-time; + config false; + description + "The date of manufacturing of the managed component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgDate"; + } + + leaf-list uri { + type inet:uri; + description + "This node contains identification information about the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalUris"; + } + + leaf uuid { + type yang:uuid; + config false; + description + "A Universally Unique Identifier of the component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalUUID"; + } + + container state { + if-feature hardware-state; + description + "State-related nodes"; + reference + "RFC 4268: Entity State MIB"; + + leaf state-last-changed { + type yang:date-and-time; + config false; + description + "The date and time when the value of any of the + admin-state, oper-state, usage-state, alarm-state, or + standby-state changed for this component. + + If there has been no change since the last + re-initialization of the local system, this node + contains the date and time of local system + initialization. If there has been no change since the + component was added to the local system, this node + contains the date and time of the insertion."; + reference + "RFC 4268: Entity State MIB - entStateLastChanged"; + } + + leaf admin-state { + type admin-state; + description + "The administrative state for this component. + + This node refers to a component's administrative + permission to service both other components within its + containment hierarchy as well other users of its + services defined by means outside the scope of this + module. + + Some components exhibit only a subset of the remaining + administrative state values. Some components cannot be + locked; hence, this node exhibits only the 'unlocked' + state. Other components cannot be shut down gracefully; + hence, this node does not exhibit the 'shutting-down' + state."; + reference + "RFC 4268: Entity State MIB - entStateAdmin"; + } + + leaf oper-state { + type oper-state; + config false; + description + "The operational state for this component. + + Note that this node does not follow the administrative + state. An administrative state of 'down' does not + predict an operational state of 'disabled'. + + Note that some implementations may not be able to + accurately report oper-state while the admin-state node + has a value other than 'unlocked'. In these cases, this + node MUST have a value of 'unknown'."; + reference + "RFC 4268: Entity State MIB - entStateOper"; + } + + leaf usage-state { + type usage-state; + config false; + description + "The usage state for this component. + + This node refers to a component's ability to service + more components in a containment hierarchy. + + Some components will exhibit only a subset of the usage + state values. Components that are unable to ever + service any components within a containment hierarchy + will always have a usage state of 'busy'. In some + cases, a component will be able to support only one + other component within its containment hierarchy and + will therefore only exhibit values of 'idle' and + 'busy'."; + reference + "RFC 4268: Entity State MIB - entStateUsage"; + } + + leaf alarm-state { + type alarm-state; + config false; + description + "The alarm state for this component. It does not + include the alarms raised on child components within its + containment hierarchy."; + reference + "RFC 4268: Entity State MIB - entStateAlarm"; + } + + leaf standby-state { + type standby-state; + config false; + description + "The standby state for this component. + + Some components will exhibit only a subset of the + remaining standby state values. If this component + cannot operate in a standby role, the value of this node + will always be 'providing-service'."; + reference + "RFC 4268: Entity State MIB - entStateStandby"; + } + } + + container sensor-data { + when 'derived-from-or-self(../class, + "ianahw:sensor")' { + description + "Sensor data nodes present for any component of type + 'sensor'"; + } + if-feature hardware-sensor; + config false; + + description + "Sensor-related nodes."; + reference + "RFC 3433: Entity Sensor Management Information Base"; + + leaf value { + type sensor-value; + description + "The most recent measurement obtained by the server + for this sensor. + + A client that periodically fetches this node should also + fetch the nodes 'value-type', 'value-scale', and + 'value-precision', since they may change when the value + is changed."; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorValue"; + } + + leaf value-type { + type sensor-value-type; + description + "The type of data units associated with the + sensor value"; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorType"; + } + leaf value-scale { + type sensor-value-scale; + description + "The (power of 10) scaling factor associated + with the sensor value"; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorScale"; + } + + leaf value-precision { + type sensor-value-precision; + description + "The number of decimal places of precision + associated with the sensor value"; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorPrecision"; + } + + leaf oper-status { + type sensor-status; + description + "The operational status of the sensor."; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorOperStatus"; + } + + leaf units-display { + type string; + description + "A textual description of the data units that should be + used in the display of the sensor value."; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorUnitsDisplay"; + } + + leaf value-timestamp { + type yang:date-and-time; + description + "The time the status and/or value of this sensor was last + obtained by the server."; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorValueTimeStamp"; + } + leaf value-update-rate { + type uint32; + units "milliseconds"; + description + "An indication of the frequency that the server updates + the associated 'value' node, represented in + milliseconds. The value zero indicates: + + - the sensor value is updated on demand (e.g., + when polled by the server for a get-request), + + - the sensor value is updated when the sensor + value changes (event-driven), or + + - the server does not know the update rate."; + reference + "RFC 3433: Entity Sensor Management Information Base - + entPhySensorValueUpdateRate"; + } + } + } + } + + /* + * Notifications + */ + + notification hardware-state-change { + description + "A hardware-state-change notification is generated when the + value of /hardware/last-change changes in the operational + state."; + reference + "RFC 6933: Entity MIB (Version 4) - entConfigChange"; + } + + notification hardware-state-oper-enabled { + if-feature hardware-state; + description + "A hardware-state-oper-enabled notification signifies that a + component has transitioned into the 'enabled' state."; + + leaf name { + type leafref { + path "/hardware/component/name"; + } + + description + "The name of the component that has transitioned into the + 'enabled' state."; + } + leaf admin-state { + type leafref { + path "/hardware/component/state/admin-state"; + } + description + "The administrative state for the component."; + } + leaf alarm-state { + type leafref { + path "/hardware/component/state/alarm-state"; + } + description + "The alarm state for the component."; + } + reference + "RFC 4268: Entity State MIB - entStateOperEnabled"; + } + + notification hardware-state-oper-disabled { + if-feature hardware-state; + description + "A hardware-state-oper-disabled notification signifies that a + component has transitioned into the 'disabled' state."; + + leaf name { + type leafref { + path "/hardware/component/name"; + } + description + "The name of the component that has transitioned into the + 'disabled' state."; + } + leaf admin-state { + type leafref { + path "/hardware/component/state/admin-state"; + } + description + "The administrative state for the component."; + } + leaf alarm-state { + type leafref { + path "/hardware/component/state/alarm-state"; + } + + description + "The alarm state for the component."; + } + reference + "RFC 4268: Entity State MIB - entStateOperDisabled"; + } + +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-inet-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-inet-types@2013-07-15.yang new file mode 100644 index 0000000000000000000000000000000000000000..eacefb6363de1beb543567a0fa705571b7dc57a2 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-inet-types@2013-07-15.yang @@ -0,0 +1,458 @@ +module ietf-inet-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types"; + prefix "inet"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types for Internet addresses and related things. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - ip-address-no-zone + - ipv4-address-no-zone + - ipv6-address-no-zone"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of types related to protocol fields ***/ + + typedef ip-version { + type enumeration { + enum unknown { + value "0"; + description + "An unknown or unspecified version of the Internet + protocol."; + } + enum ipv4 { + value "1"; + description + "The IPv4 protocol as defined in RFC 791."; + } + enum ipv6 { + value "2"; + description + "The IPv6 protocol as defined in RFC 2460."; + } + } + description + "This value represents the version of the IP protocol. + + In the value set and its semantics, this type is equivalent + to the InetVersion textual convention of the SMIv2."; + reference + "RFC 791: Internet Protocol + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + typedef dscp { + type uint8 { + range "0..63"; + } + description + "The dscp type represents a Differentiated Services Code Point + that may be used for marking packets in a traffic stream. + In the value set and its semantics, this type is equivalent + to the Dscp textual convention of the SMIv2."; + reference + "RFC 3289: Management Information Base for the Differentiated + Services Architecture + RFC 2474: Definition of the Differentiated Services Field + (DS Field) in the IPv4 and IPv6 Headers + RFC 2780: IANA Allocation Guidelines For Values In + the Internet Protocol and Related Headers"; + } + + typedef ipv6-flow-label { + type uint32 { + range "0..1048575"; + } + description + "The ipv6-flow-label type represents the flow identifier or Flow + Label in an IPv6 packet header that may be used to + discriminate traffic flows. + + In the value set and its semantics, this type is equivalent + to the IPv6FlowLabel textual convention of the SMIv2."; + reference + "RFC 3595: Textual Conventions for IPv6 Flow Label + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification"; + } + + typedef port-number { + type uint16 { + range "0..65535"; + } + description + "The port-number type represents a 16-bit port number of an + Internet transport-layer protocol such as UDP, TCP, DCCP, or + SCTP. Port numbers are assigned by IANA. A current list of + all assignments is available from <http://www.iana.org/>. + + Note that the port number value zero is reserved by IANA. In + situations where the value zero does not make sense, it can + be excluded by subtyping the port-number type. + In the value set and its semantics, this type is equivalent + to the InetPortNumber textual convention of the SMIv2."; + reference + "RFC 768: User Datagram Protocol + RFC 793: Transmission Control Protocol + RFC 4960: Stream Control Transmission Protocol + RFC 4340: Datagram Congestion Control Protocol (DCCP) + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + /*** collection of types related to autonomous systems ***/ + + typedef as-number { + type uint32; + description + "The as-number type represents autonomous system numbers + which identify an Autonomous System (AS). An AS is a set + of routers under a single technical administration, using + an interior gateway protocol and common metrics to route + packets within the AS, and using an exterior gateway + protocol to route packets to other ASes. IANA maintains + the AS number space and has delegated large parts to the + regional registries. + + Autonomous system numbers were originally limited to 16 + bits. BGP extensions have enlarged the autonomous system + number space to 32 bits. This type therefore uses an uint32 + base type without a range restriction in order to support + a larger autonomous system number space. + + In the value set and its semantics, this type is equivalent + to the InetAutonomousSystemNumber textual convention of + the SMIv2."; + reference + "RFC 1930: Guidelines for creation, selection, and registration + of an Autonomous System (AS) + RFC 4271: A Border Gateway Protocol 4 (BGP-4) + RFC 4001: Textual Conventions for Internet Network Addresses + RFC 6793: BGP Support for Four-Octet Autonomous System (AS) + Number Space"; + } + + /*** collection of types related to IP addresses and hostnames ***/ + + typedef ip-address { + type union { + type inet:ipv4-address; + type inet:ipv6-address; + } + description + "The ip-address type represents an IP address and is IP + version neutral. The format of the textual representation + implies the IP version. This type supports scoped addresses + by allowing zone identifiers in the address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '(%[\p{N}\p{L}]+)?'; + } + description + "The ipv4-address type represents an IPv4 address in + dotted-quad notation. The IPv4 address may include a zone + index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format for the zone index is the numerical + format"; + } + + typedef ipv6-address { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(%[\p{N}\p{L}]+)?'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(%.+)?'; + } + description + "The ipv6-address type represents an IPv6 address in full, + mixed, shortened, and shortened-mixed notation. The IPv6 + address may include a zone index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format of IPv6 addresses uses the textual + representation defined in Section 4 of RFC 5952. The + canonical format for the zone index is the numerical + format as described in Section 11.2 of RFC 4007."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-address-no-zone { + type union { + type inet:ipv4-address-no-zone; + type inet:ipv6-address-no-zone; + } + description + "The ip-address-no-zone type represents an IP address and is + IP version neutral. The format of the textual representation + implies the IP version. This type does not support scoped + addresses since it does not allow zone identifiers in the + address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address-no-zone { + type inet:ipv4-address { + pattern '[0-9\.]*'; + } + description + "An IPv4 address without a zone index. This type, derived from + ipv4-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + } + + typedef ipv6-address-no-zone { + type inet:ipv6-address { + pattern '[0-9a-fA-F:\.]*'; + } + description + "An IPv6 address without a zone index. This type, derived from + ipv6-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-prefix { + type union { + type inet:ipv4-prefix; + type inet:ipv6-prefix; + } + description + "The ip-prefix type represents an IP prefix and is IP + version neutral. The format of the textual representations + implies the IP version."; + } + + typedef ipv4-prefix { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '/(([0-9])|([1-2][0-9])|(3[0-2]))'; + } + description + "The ipv4-prefix type represents an IPv4 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 32. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The canonical format of an IPv4 prefix has all bits of + the IPv4 address set to zero that are not part of the + IPv4 prefix."; + } + + typedef ipv6-prefix { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(/.+)'; + } + + description + "The ipv6-prefix type represents an IPv6 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 128. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The IPv6 address should have all bits that do not belong + to the prefix set to zero. + + The canonical format of an IPv6 prefix has all bits of + the IPv6 address set to zero that are not part of the + IPv6 prefix. Furthermore, the IPv6 address is represented + as defined in Section 4 of RFC 5952."; + reference + "RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + /*** collection of domain name and URI types ***/ + + typedef domain-name { + type string { + pattern + '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*' + + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)' + + '|\.'; + length "1..253"; + } + description + "The domain-name type represents a DNS domain name. The + name SHOULD be fully qualified whenever possible. + + Internet domain names are only loosely specified. Section + 3.5 of RFC 1034 recommends a syntax (modified in Section + 2.1 of RFC 1123). The pattern above is intended to allow + for current practice in domain name use, and some possible + future expansion. It is designed to hold various types of + domain names, including names used for A or AAAA records + (host names) and other records, such as SRV records. Note + that Internet host names have a stricter syntax (described + in RFC 952) than the DNS recommendations in RFCs 1034 and + 1123, and that systems that want to store host names in + schema nodes using the domain-name type are recommended to + adhere to this stricter standard to ensure interoperability. + + The encoding of DNS names in the DNS protocol is limited + to 255 characters. Since the encoding consists of labels + prefixed by a length bytes and there is a trailing NULL + byte, only 253 characters can appear in the textual dotted + notation. + + The description clause of schema nodes using the domain-name + type MUST describe when and how these names are resolved to + IP addresses. Note that the resolution of a domain-name value + may require to query multiple DNS records (e.g., A for IPv4 + and AAAA for IPv6). The order of the resolution process and + which DNS record takes precedence can either be defined + explicitly or may depend on the configuration of the + resolver. + + Domain-name values use the US-ASCII encoding. Their canonical + format uses lowercase US-ASCII characters. Internationalized + domain names MUST be A-labels as per RFC 5890."; + reference + "RFC 952: DoD Internet Host Table Specification + RFC 1034: Domain Names - Concepts and Facilities + RFC 1123: Requirements for Internet Hosts -- Application + and Support + RFC 2782: A DNS RR for specifying the location of services + (DNS SRV) + RFC 5890: Internationalized Domain Names in Applications + (IDNA): Definitions and Document Framework"; + } + + typedef host { + type union { + type inet:ip-address; + type inet:domain-name; + } + description + "The host type represents either an IP address or a DNS + domain name."; + } + + typedef uri { + type string; + description + "The uri type represents a Uniform Resource Identifier + (URI) as defined by STD 66. + + Objects using the uri type MUST be in US-ASCII encoding, + and MUST be normalized as described by RFC 3986 Sections + 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary + percent-encoding is removed, and all case-insensitive + characters are set to lowercase except for hexadecimal + digits, which are normalized to uppercase as described in + Section 6.2.2.1. + + The purpose of this normalization is to help provide + unique URIs. Note that this normalization is not + sufficient to provide uniqueness. Two URIs that are + textually distinct after this normalization may still be + equivalent. + + Objects using the uri type may restrict the schemes that + they permit. For example, 'data:' and 'urn:' schemes + might not be appropriate. + + A zero-length URI is not a valid URI. This can be used to + express 'URI absent' where required. + + In the value set and its semantics, this type is equivalent + to the Uri SMIv2 textual convention defined in RFC 5017."; + reference + "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax + RFC 3305: Report from the Joint W3C/IETF URI Planning Interest + Group: Uniform Resource Identifiers (URIs), URLs, + and Uniform Resource Names (URNs): Clarifications + and Recommendations + RFC 5017: MIB Textual Conventions for Uniform Resource + Identifiers (URIs)"; + } + +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-network-hardware-inventory@2023-03-09.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-network-hardware-inventory@2023-03-09.yang new file mode 100644 index 0000000000000000000000000000000000000000..e074e3005e97f9657f7ef23a39741d3ce4b912b8 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-network-hardware-inventory@2023-03-09.yang @@ -0,0 +1,604 @@ +module ietf-network-hardware-inventory { + yang-version 1.1; + namespace + "urn:ietf:params:xml:ns:yang:ietf-network-hardware-inventory"; + prefix nhi; + + import ietf-yang-types { + prefix yang; + reference + "RFC6991: Common YANG Data Types."; + } + + import iana-hardware { + prefix ianahw; + reference + "https://www.iana.org/assignments/yang-parameters"; + } + + import ietf-inet-types { + prefix inet; + reference + "RFC6991: Common YANG Data Types."; + } + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: <https://datatracker.ietf.org/wg/ccamp/> + WG List: <mailto:ccamp@ietf.org> + + Editor: Chaode Yu + <yuchaode@huawei.com> + + Editor: Italo Busi + <italo.busi@huawei.com> + + Editor: Aihua Guo + <aihuaguo.ietf@gmail.com> + + Editor: Sergio Belotti + <sergio.belotti@nokia.com> + + Editor: Jean-Francois Bouquier + <jeff.bouquier@vodafone.com> + + Editor: Fabio Peruzzini + <fabio.peruzzini@telecomitalia.it>"; + + description + "This module defines a model for retrieving network hardware + inventory. + + The model fully conforms to the Network Management + Datastore Architecture (NMDA). + Copyright (c) 2022 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + // RFC Ed.: replace XXXX with actual RFC number and remove this + // note. + // RFC Ed.: update the date below with the date of RFC publication + // and remove this note. + + revision 2023-03-09 { + description + "Initial version"; + reference + "RFC XXXX: A YANG Data Model for Network Hardware Inventory."; + //RFC Editor: replace XXXX with actual RFC number, update date + //information and remove this note + } + + container network-hardware-inventory { + config false; + description + "The top-level container for the network inventory + information."; + uses equipment-rooms-grouping; + uses network-elements-grouping; + } + + grouping common-entity-attributes { + description + "A set of attributes which are common to all the entities + (e.g., component, equipment room) defined in this module."; + leaf uuid { + type yang:uuid; + description + "Uniquely identifies an entity (e.g., component)."; + } + leaf name { + type string; + description + "A name for an entity (e.g., component), as specified by + a network manager, that provides a non-volatile 'handle' + for the entity and that can be modified anytime during the + entity lifetime. + + If no configured value exists, the server MAY set the value + of this node to a locally unique value in the operational + state."; + } + leaf description { + type string; + description "a textual description of inventory object"; + } + leaf alias { + type string; + description + "a alias name of inventory objects. This alias name can be + specified by network manager."; + } + } + + grouping network-elements-grouping { + description + "The attributes of the network elements."; + container network-elements { + description + "The container for the list of network elements."; + list network-element { + key uuid; + description + "The list of network elements within the network."; + uses common-entity-attributes; + container ne-location { + description + "The location information of this network element."; + leaf-list equipment-room-name { + type leafref { + path "/nhi:network-hardware-inventory/" + + "nhi:equipment-rooms/nhi:equipment-room/nhi:name"; + } + description + "Names of equipment rooms where the NE is located. + Please note that a NE could be located in several + equipment rooms."; + } + } + uses ne-specific-info-grouping; + uses components-grouping; + } + } + } + + grouping ne-specific-info-grouping { + description + "Attributes applicable to network elements."; + leaf hardware-rev { + type string; + description + "The vendor-specific hardware revision string for the NE."; + } + leaf software-rev { + type string; + description + "The vendor-specific software revision string for the NE."; + } + leaf mfg-name { + type string; + description "The name of the manufacturer of this NE"; + } + leaf mfg-date { + type yang:date-and-time; + description "The date of manufacturing of the NE."; + } + leaf part-number { + type string; + description + "The vendor-specific model name identifier string associated + with this NE. The preferred value is the customer-visible + part number, which may be printed on the NE itself."; + } + leaf serial-number { + type string; + description + "The vendor-specific serial number string for the NE"; + } + leaf product-name { + type string; + description + "indicates the vendor-spefic device type infomation."; + } + } + + grouping equipment-rooms-grouping { + description + "The attributes of the equipment rooms."; + container equipment-rooms { + description + "The container for the list of equipment rooms."; + list equipment-room { + key uuid; + description + "The list of equipment rooms within the network."; + uses common-entity-attributes; + leaf location { + type string; + description + "compared with the location information of the other + inventory objects, a GIS address is preferred for + equipment room"; + } + container racks { + description + "Top level container for the list of racks."; + list rack { + key uuid; + description + "The list of racks within an equipment room."; + uses common-entity-attributes; + uses rack-specific-info-grouping; + list contained-chassis { + key "ne-ref component-ref"; + description + "The list of chassis within a rack."; + leaf ne-ref { + type leafref { + path "/nhi:network-hardware-inventory" + + "/nhi:network-elements/nhi:network-element" + + "/nhi:uuid"; + } + description + "The reference to the network element containing + the chassis component."; + } + leaf component-ref { + type leafref { + path "/nhi:network-hardware-inventory" + + "/nhi:network-elements/nhi:network-element" + + "[nhi:uuid=current()/../ne-ref]/nhi:components" + + "/nhi:component/nhi:uuid"; + } + description + "The reference to the chassis component within + the network element and contained by the rack."; + } + leaf relative-position { + type uint8; + description "A relative position of chassis within + the rack"; + } + } + } + } + } + } + } + + grouping rack-specific-info-grouping { + description + "Attributes applicable to racks only."; + container rack-location { + description + "The location information of the rack, which comprises the + name of the equipment room, row number, and column number."; + leaf equipment-room-name { + type leafref { + path "/nhi:network-hardware-inventory/nhi:equipment-rooms" + + "/nhi:equipment-room/nhi:name"; + } + description + "Name of equipment room where this rack is located."; + } + leaf row-number { + type uint32; + description + "Identifies the row within the equipment room where + the rack is located."; + } + leaf column-number { + type uint32; + description + "Identifies the physical location of the rack within + the column."; + } + } + leaf height { + type uint16; + units millimeter; + description + "Rack height."; + } + leaf width { + type uint16; + units millimeter; + description + "Rack width."; + } + leaf depth { + type uint16; + units millimeter; + description + "Rack depth."; + } + leaf max-voltage { + type uint16; + units volt; + description + "The maximum voltage could be supported by the rack."; + } + } + + grouping components-grouping { + description + "The attributes of the hardware components."; + container components { + description + "The container for the list of components."; + list component { + key uuid; + description + "The list of components within a network element."; + uses common-entity-attributes; + leaf location { + type string; + description + "A relative location information of this component. + In optical transport network, the location string is + using the following pattern: + '/ne=<nw-ne-name>[/r=<r_index>][/sh=<sh_index> + [/s_sh=<s_sh_index> ...]][[/sl=<sl_index> + [/s_sl=<s_sl_index> ...]][/p=<p_index> …]]' + "; + } + leaf class { + type identityref { + base ianahw:hardware-class; + } + description + "An indication of the general hardware type of the + component."; + reference + "RFC 8348: A YANG Data Model for Hardware Management."; + } + leaf-list contained-child { + type leafref { + path "../nhi:uuid"; + } + description + "The list of the identifiers of the child components + physically contained within this component."; + } + leaf parent-rel-pos { + type int32 { + range "0 .. 2147483647"; + } + description + "The relative position with respect to the parent + component among all the sibling components."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalParentRelPos"; + } + + container parent-component-references { + description + "The top level container for the list of the + identifiers of the parents of this component in a + hierarchy."; + list component-reference { + key index; + description + "The list of the identifiers of the parents of this + component in a hierarchy. + + The index parameter defines the hierarchy: the topmost + parent has an index of 0."; + leaf index { + type uint8; + description + "The index of the parent with respect to the + hierarchy."; + } + leaf class { + type leafref { + path "../../../nhi:class"; + } + description + "Class of the hierarchial parent component."; + } + leaf uuid { + type leafref { + path "../../../nhi:uuid"; + } + description + "The identifier of the parent's component in the + hierarchy."; + } + } + } + + leaf hardware-rev { + type string; + description + "The vendor-specific hardware revision string for the + component. The preferred value is the hardware revision + identifier actually printed on the component itself (if + present)."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalHardwareRev"; + } + leaf firmware-rev { + type string; + description + "The vendor-specific firmware revision string for the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalFirmwareRev"; + } + leaf software-rev { + type string; + description + "The vendor-specific software revision string for the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalSoftwareRev"; + } + leaf serial-num { + type string; + description + "The vendor-specific serial number string for the + component. The preferred value is the serial number + string actually printed on the component itself (if + present)."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalSerialNum"; + } + leaf mfg-name { + type string; + description + "The name of the manufacturer of this physical component. + The preferred value is the manufacturer name string + actually printed on the component itself (if present). + + Note that comparisons between instances of the + 'model-name', 'firmware-rev', 'software-rev', and + 'serial-num' nodes are only meaningful amongst + components with the same value of 'mfg-name'. + + If the manufacturer name string associated with the + physical component is unknown to the server, then this + node is not instantiated."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgName"; + } + leaf part-number { + type string; + description + "The vendor-specific model name identifier string + associated with this physical component. The preferred + value is the customer-visible part number, which may be + printed on the component itself. + + If the model name string associated with the physical + component is unknown to the server, then this node is + not instantiated."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalModelName"; + } + leaf asset-id { + type string; + description + "This node is a user-assigned asset tracking identifier + for the component. + + A server implementation MAY map this leaf to the + entPhysicalAssetID MIB object. Such an implementation + needs to use some mechanism to handle the differences in + size and characters allowed between this leaf and + entPhysicalAssetID. The definition of such a mechanism + is outside the scope of this document."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalAssetID"; + } + leaf is-fru { + type boolean; + description + "This node indicates whether or not this component is + considered a 'field-replaceable unit' by the vendor. If + this node contains the value 'true', then this component + identifies a field-replaceable unit. For all components + that are permanently contained within a + field-replaceable unit, the value 'false' should be + returned for this node."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalIsFRU"; + } + leaf mfg-date { + type yang:date-and-time; + description + "The date of manufacturing of the managed component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgDate"; + } + leaf-list uri { + type inet:uri; + description + "This node contains identification information about the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalUris"; + } + uses component-specific-info-grouping; + } + } + } + + grouping component-specific-info-grouping { + description + "In case if there are some missing attributes of component not + defined by RFC8348. These attributes could be + component-specific. + Here we provide a extension structure for all the components + we recognized. We will enrich these component specifc + containers in the future."; + choice component-class { + description + "This extension differs between different component + classes."; + case chassis { + when "./class = 'ianahw:chassis'"; + container chassis-specific-info { + description + "This container contains some attributes belong to + chassis only."; + uses chassis-specific-info-grouping; + } + } + case container { + when "./class = 'ianahw:container'"; + container slot-specific-info { + description + "This container contains some attributes belong to + slot or sub-slot only."; + uses slot-specific-info-grouping; + } + } + case module { + when "./nhi:class = 'ianahw:module'"; + container board-specific-info { + description + "This container contains some attributes belong to + board only."; + uses board-specific-info-grouping; + } + } + case port { + when "./nhi:class = 'ianahw:port'"; + container port-specific-info { + description + "This container contains some attributes belong to + port only."; + uses port-specific-info-grouping; + } + } + //TO BE ADDED: transceiver + } + } + + grouping chassis-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to chassis only."; + } + + grouping slot-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to slots only."; + } + + grouping board-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to boards only."; + } + + grouping port-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to ports only."; + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-yang-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-yang-types@2013-07-15.yang new file mode 100644 index 0000000000000000000000000000000000000000..ee58fa3ab0042120d5607b8713d21fa0ba845895 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-yang-types@2013-07-15.yang @@ -0,0 +1,474 @@ +module ietf-yang-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types"; + prefix "yang"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - yang-identifier + - hex-string + - uuid + - dotted-quad"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of counter and gauge types ***/ + + typedef counter32 { + type uint32; + description + "The counter32 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter32 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter32 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter32. + + In the value set and its semantics, this type is equivalent + to the Counter32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter32 { + type yang:counter32; + default "0"; + description + "The zero-based-counter32 type represents a counter32 + that has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter32 textual convention of the SMIv2."; + reference + "RFC 4502: Remote Network Monitoring Management Information + Base Version 2"; + } + + typedef counter64 { + type uint64; + description + "The counter64 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter64 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter64 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter64. + + In the value set and its semantics, this type is equivalent + to the Counter64 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter64 { + type yang:counter64; + default "0"; + description + "The zero-based-counter64 type represents a counter64 that + has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter64 textual convention of the SMIv2."; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + typedef gauge32 { + type uint32; + description + "The gauge32 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^32-1 (4294967295 decimal), and + the minimum value cannot be smaller than 0. The value of + a gauge32 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge32 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the Gauge32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef gauge64 { + type uint64; + description + "The gauge64 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^64-1 (18446744073709551615), and + the minimum value cannot be smaller than 0. The value of + a gauge64 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge64 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the CounterBasedGauge64 SMIv2 textual convention defined + in RFC 2856"; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + /*** collection of identifier-related types ***/ + + typedef object-identifier { + type string { + pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))' + + '(\.(0|([1-9]\d*)))*'; + } + description + "The object-identifier type represents administratively + assigned names in a registration-hierarchical-name tree. + + Values of this type are denoted as a sequence of numerical + non-negative sub-identifier values. Each sub-identifier + value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers + are separated by single dots and without any intermediate + whitespace. + + The ASN.1 standard restricts the value space of the first + sub-identifier to 0, 1, or 2. Furthermore, the value space + of the second sub-identifier is restricted to the range + 0 to 39 if the first sub-identifier is 0 or 1. Finally, + the ASN.1 standard requires that an object identifier + has always at least two sub-identifiers. The pattern + captures these restrictions. + + Although the number of sub-identifiers is not limited, + module designers should realize that there may be + implementations that stick with the SMIv2 limit of 128 + sub-identifiers. + + This type is a superset of the SMIv2 OBJECT IDENTIFIER type + since it is not restricted to 128 sub-identifiers. Hence, + this type SHOULD NOT be used to represent the SMIv2 OBJECT + IDENTIFIER type; the object-identifier-128 type SHOULD be + used instead."; + reference + "ISO9834-1: Information technology -- Open Systems + Interconnection -- Procedures for the operation of OSI + Registration Authorities: General procedures and top + arcs of the ASN.1 Object Identifier tree"; + } + + typedef object-identifier-128 { + type object-identifier { + pattern '\d*(\.\d*){1,127}'; + } + description + "This type represents object-identifiers restricted to 128 + sub-identifiers. + + In the value set and its semantics, this type is equivalent + to the OBJECT IDENTIFIER type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef yang-identifier { + type string { + length "1..max"; + pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*'; + pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*'; + } + description + "A YANG identifier string as defined by the 'identifier' + rule in Section 12 of RFC 6020. An identifier must + start with an alphabetic character or an underscore + followed by an arbitrary sequence of alphabetic or + numeric characters, underscores, hyphens, or dots. + + A YANG identifier MUST NOT start with any possible + combination of the lowercase or uppercase character + sequence 'xml'."; + reference + "RFC 6020: YANG - A Data Modeling Language for the Network + Configuration Protocol (NETCONF)"; + } + + /*** collection of types related to date and time***/ + + typedef date-and-time { + type string { + pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?' + + '(Z|[\+\-]\d{2}:\d{2})'; + } + description + "The date-and-time type is a profile of the ISO 8601 + standard for representation of dates and times using the + Gregorian calendar. The profile is defined by the + date-time production in Section 5.6 of RFC 3339. + + The date-and-time type is compatible with the dateTime XML + schema type with the following notable exceptions: + + (a) The date-and-time type does not allow negative years. + + (b) The date-and-time time-offset -00:00 indicates an unknown + time zone (see RFC 3339) while -00:00 and +00:00 and Z + all represent the same time zone in dateTime. + + (c) The canonical format (see below) of data-and-time values + differs from the canonical format used by the dateTime XML + schema type, which requires all times to be in UTC using + the time-offset 'Z'. + + This type is not equivalent to the DateAndTime textual + convention of the SMIv2 since RFC 3339 uses a different + separator between full-date and full-time and provides + higher resolution of time-secfrac. + + The canonical format for date-and-time values with a known time + zone uses a numeric time zone offset that is calculated using + the device's configured known offset to UTC time. A change of + the device's offset to UTC time will cause date-and-time values + to change accordingly. Such changes might happen periodically + in case a server follows automatically daylight saving time + (DST) time zone offset changes. The canonical format for + date-and-time values with an unknown time zone (usually + referring to the notion of local time) uses the time-offset + -00:00."; + reference + "RFC 3339: Date and Time on the Internet: Timestamps + RFC 2579: Textual Conventions for SMIv2 + XSD-TYPES: XML Schema Part 2: Datatypes Second Edition"; + } + + typedef timeticks { + type uint32; + description + "The timeticks type represents a non-negative integer that + represents the time, modulo 2^32 (4294967296 decimal), in + hundredths of a second between two epochs. When a schema + node is defined that uses this type, the description of + the schema node identifies both of the reference epochs. + + In the value set and its semantics, this type is equivalent + to the TimeTicks type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef timestamp { + type yang:timeticks; + description + "The timestamp type represents the value of an associated + timeticks schema node at which a specific occurrence + happened. The specific occurrence must be defined in the + description of any schema node defined using this type. When + the specific occurrence occurred prior to the last time the + associated timeticks attribute was zero, then the timestamp + value is zero. Note that this requires all timestamp values + to be reset to zero when the value of the associated timeticks + attribute reaches 497+ days and wraps around to zero. + + The associated timeticks schema node must be specified + in the description of any schema node using this type. + + In the value set and its semantics, this type is equivalent + to the TimeStamp textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of generic address types ***/ + + typedef phys-address { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "Represents media- or physical-level addresses represented + as a sequence octets, each octet represented by two hexadecimal + numbers. Octets are separated by colons. The canonical + representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the PhysAddress textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + typedef mac-address { + type string { + pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'; + } + description + "The mac-address type represents an IEEE 802 MAC address. + The canonical representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the MacAddress textual convention of the SMIv2."; + reference + "IEEE 802: IEEE Standard for Local and Metropolitan Area + Networks: Overview and Architecture + RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of XML-specific types ***/ + + typedef xpath1.0 { + type string; + description + "This type represents an XPATH 1.0 expression. + + When a schema node is defined that uses this type, the + description of the schema node MUST specify the XPath + context in which the XPath expression is evaluated."; + reference + "XPATH: XML Path Language (XPath) Version 1.0"; + } + + /*** collection of string types ***/ + + typedef hex-string { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + description + "A hexadecimal string with octets represented as hex digits + separated by colons. The canonical representation uses + lowercase characters."; + } + + typedef uuid { + type string { + pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'; + } + description + "A Universally Unique IDentifier in the string representation + defined in RFC 4122. The canonical representation uses + lowercase characters. + + The following is an example of a UUID in string representation: + f81d4fae-7dec-11d0-a765-00a0c91e6bf6 + "; + reference + "RFC 4122: A Universally Unique IDentifier (UUID) URN + Namespace"; + } + + typedef dotted-quad { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'; + } + description + "An unsigned 32-bit number expressed in the dotted-quad + notation, i.e., four octets written as decimal numbers + and separated with the '.' (full stop) character."; + } +} diff --git a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py index ce60bdea3a7ab08b8dc24dd2e7c2efe4ecf81ae0..f360e318127706b4b4c8fdc4130dfdfc0ba711c0 100644 --- a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py +++ b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py @@ -15,33 +15,71 @@ import json from flask.json import jsonify from flask_restful import Resource, request +from werkzeug.exceptions import BadRequest from common.proto.context_pb2 import Empty from common.tools.grpc.Tools import grpc_message_to_json from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient from .Tools import ( - format_grpc_to_json, grpc_connection_id, grpc_context_id, grpc_device_id, grpc_link_id, grpc_policy_rule_id, - grpc_service_id, grpc_service, grpc_slice_id, grpc_topology_id) + format_grpc_to_json, grpc_connection_id, grpc_context, grpc_context_id, grpc_device, + grpc_device_id, grpc_link, grpc_link_id, grpc_policy_rule_id, + grpc_service_id, grpc_service, grpc_slice, grpc_slice_id, grpc_topology, grpc_topology_id +) class _Resource(Resource): def __init__(self) -> None: super().__init__() - self.client = ContextClient() + self.context_client = ContextClient() + self.device_client = DeviceClient() self.service_client = ServiceClient() + self.slice_client = SliceClient() class ContextIds(_Resource): def get(self): - return format_grpc_to_json(self.client.ListContextIds(Empty())) + return format_grpc_to_json(self.context_client.ListContextIds(Empty())) class Contexts(_Resource): def get(self): - return format_grpc_to_json(self.client.ListContexts(Empty())) + return format_grpc_to_json(self.context_client.ListContexts(Empty())) + + def post(self): + json_requests = request.get_json() + if 'contexts' in json_requests: + json_requests = json_requests['contexts'] + return [ + format_grpc_to_json(self.context_client.SetContext(grpc_context(context))) + for context in json_requests + ] + +class Context(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.context_client.GetContext(grpc_context_id(context_uuid))) + + def put(self, context_uuid : str): + context = request.get_json() + if context_uuid != context['context_id']['context_uuid']['uuid']: + raise BadRequest('Mismatching context_uuid') + return format_grpc_to_json(self.context_client.SetContext(grpc_context(context))) + + def delete(self, context_uuid : str): + return format_grpc_to_json(self.context_client.RemoveContext(grpc_context_id(context_uuid))) class DummyContexts(_Resource): def get(self): - contexts = grpc_message_to_json(self.client.ListContexts(Empty()), use_integers_for_enums=True)['contexts'] - devices = grpc_message_to_json(self.client.ListDevices(Empty()), use_integers_for_enums=True)['devices'] - links = grpc_message_to_json(self.client.ListLinks(Empty()), use_integers_for_enums=True)['links'] + contexts = grpc_message_to_json( + self.context_client.ListContexts(Empty()), + use_integers_for_enums=True + )['contexts'] + devices = grpc_message_to_json( + self.context_client.ListDevices(Empty()), + use_integers_for_enums=True + )['devices'] + links = grpc_message_to_json( + self.context_client.ListLinks(Empty()), + use_integers_for_enums=True + )['links'] topologies = list() slices = list() @@ -53,17 +91,17 @@ class DummyContexts(_Resource): context_id = grpc_context_id(context_uuid) topologies.extend(grpc_message_to_json( - self.client.ListTopologies(context_id), + self.context_client.ListTopologies(context_id), use_integers_for_enums=True )['topologies']) slices.extend(grpc_message_to_json( - self.client.ListSlices(context_id), + self.context_client.ListSlices(context_id), use_integers_for_enums=True )['slices']) context_services = grpc_message_to_json( - self.client.ListServices(context_id), + self.context_client.ListServices(context_id), use_integers_for_enums=True )['services'] services.extend(context_services) @@ -72,7 +110,7 @@ class DummyContexts(_Resource): service_uuid = service['service_id']['service_uuid']['uuid'] service_id = grpc_service_id(context_uuid, service_uuid) connections.extend(grpc_message_to_json( - self.client.ListConnections(service_id), + self.context_client.ListConnections(service_id), use_integers_for_enums=True )['connections']) @@ -97,115 +135,191 @@ class DummyContexts(_Resource): if len(connections) > 0: dummy_context['connections'] = connections return jsonify(dummy_context) -class Context(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.client.GetContext(grpc_context_id(context_uuid))) - class TopologyIds(_Resource): def get(self, context_uuid : str): - return format_grpc_to_json(self.client.ListTopologyIds(grpc_context_id(context_uuid))) + return format_grpc_to_json(self.context_client.ListTopologyIds(grpc_context_id(context_uuid))) class Topologies(_Resource): def get(self, context_uuid : str): - return format_grpc_to_json(self.client.ListTopologies(grpc_context_id(context_uuid))) + return format_grpc_to_json(self.context_client.ListTopologies(grpc_context_id(context_uuid))) + + def post(self, context_uuid : str): + json_requests = request.get_json() + if 'topologies' in json_requests: + json_requests = json_requests['topologies'] + for topology in json_requests: + if context_uuid != topology['topology_id']['context_id']['context_uuid']['uuid']: + raise BadRequest('Mismatching context_uuid') + return [ + format_grpc_to_json(self.context_client.SetTopology(grpc_topology(**topology))) + for topology in json_requests + ] class Topology(_Resource): def get(self, context_uuid : str, topology_uuid : str): - return format_grpc_to_json(self.client.GetTopology(grpc_topology_id(context_uuid, topology_uuid))) + return format_grpc_to_json(self.context_client.GetTopology(grpc_topology_id(context_uuid, topology_uuid))) + + def put(self, context_uuid : str, topology_uuid : str): + topology = request.get_json() + if context_uuid != topology['topology_id']['context_id']['context_uuid']['uuid']: + raise BadRequest('Mismatching context_uuid') + if topology_uuid != topology['topology_id']['topology_uuid']['uuid']: + raise BadRequest('Mismatching topology_uuid') + return format_grpc_to_json(self.context_client.SetTopology(grpc_topology(topology))) + + def delete(self, context_uuid : str, topology_uuid : str): + return format_grpc_to_json(self.context_client.RemoveTopology(grpc_topology_id(context_uuid, topology_uuid))) class ServiceIds(_Resource): def get(self, context_uuid : str): - return format_grpc_to_json(self.client.ListServiceIds(grpc_context_id(context_uuid))) + return format_grpc_to_json(self.context_client.ListServiceIds(grpc_context_id(context_uuid))) class Services(_Resource): def get(self, context_uuid : str): - return format_grpc_to_json(self.client.ListServices(grpc_context_id(context_uuid))) + return format_grpc_to_json(self.context_client.ListServices(grpc_context_id(context_uuid))) + + def post(self, context_uuid : str): + json_requests = request.get_json() + if 'services' in json_requests: + json_requests = json_requests['services'] + for service in json_requests: + if context_uuid != service['service_id']['context_id']['context_uuid']['uuid']: + raise BadRequest('Mismatching context_uuid') + return [ + format_grpc_to_json(self.service_client.CreateService(grpc_service(**service))) + for service in json_requests + ] class Service(_Resource): def get(self, context_uuid : str, service_uuid : str): - return format_grpc_to_json(self.client.GetService(grpc_service_id(context_uuid, service_uuid))) - - def post(self, context_uuid : str, service_uuid : str): # pylint: disable=unused-argument - service = request.get_json()['services'][0] - return format_grpc_to_json(self.service_client.CreateService(grpc_service( - service_uuid = service['service_id']['service_uuid']['uuid'], - service_type = service['service_type'], - context_uuid = service['service_id']['context_id']['context_uuid']['uuid'], - ))) - - def put(self, context_uuid : str, service_uuid : str): # pylint: disable=unused-argument - service = request.get_json()['services'][0] - return format_grpc_to_json(self.service_client.UpdateService(grpc_service( - service_uuid = service['service_id']['service_uuid']['uuid'], - service_type = service['service_type'], - context_uuid = service['service_id']['context_id']['context_uuid']['uuid'], - status = service['service_status']['service_status'], - endpoint_ids = service['service_endpoint_ids'], - constraints = service['service_constraints'], - config_rules = service['service_config']['config_rules'] - ))) + return format_grpc_to_json(self.context_client.GetService(grpc_service_id(context_uuid, service_uuid))) + + def put(self, context_uuid : str, service_uuid : str): + service = request.get_json() + if context_uuid != service['service_id']['context_id']['context_uuid']['uuid']: + raise BadRequest('Mismatching context_uuid') + if service_uuid != service['service_id']['service_uuid']['uuid']: + raise BadRequest('Mismatching service_uuid') + return format_grpc_to_json(self.service_client.UpdateService(grpc_service(service))) def delete(self, context_uuid : str, service_uuid : str): - return format_grpc_to_json(self.service_client.DeleteService(grpc_service_id( - context_uuid, service_uuid, - ))) + return format_grpc_to_json(self.service_client.DeleteService(grpc_service_id(context_uuid, service_uuid))) class SliceIds(_Resource): def get(self, context_uuid : str): - return format_grpc_to_json(self.client.ListSliceIds(grpc_context_id(context_uuid))) + return format_grpc_to_json(self.context_client.ListSliceIds(grpc_context_id(context_uuid))) class Slices(_Resource): def get(self, context_uuid : str): - return format_grpc_to_json(self.client.ListSlices(grpc_context_id(context_uuid))) + return format_grpc_to_json(self.context_client.ListSlices(grpc_context_id(context_uuid))) + + def post(self, context_uuid : str): + json_requests = request.get_json() + if 'slices' in json_requests: + json_requests = json_requests['slices'] + for slice_ in json_requests: + if context_uuid != slice_['slice_id']['context_id']['context_uuid']['uuid']: + raise BadRequest('Mismatching context_uuid') + return [ + format_grpc_to_json(self.slice_client.CreateSlice(grpc_slice(**slice_))) + for slice_ in json_requests + ] class Slice(_Resource): def get(self, context_uuid : str, slice_uuid : str): - return format_grpc_to_json(self.client.GetSlice(grpc_slice_id(context_uuid, slice_uuid))) + return format_grpc_to_json(self.context_client.GetSlice(grpc_slice_id(context_uuid, slice_uuid))) + + def put(self, context_uuid : str, slice_uuid : str): + slice_ = request.get_json() + if context_uuid != slice_['slice_id']['context_id']['context_uuid']['uuid']: + raise BadRequest('Mismatching context_uuid') + if slice_uuid != slice_['slice_id']['slice_uuid']['uuid']: + raise BadRequest('Mismatching slice_uuid') + return format_grpc_to_json(self.slice_client.UpdateSlice(grpc_slice(slice_))) + + def delete(self, context_uuid : str, slice_uuid : str): + return format_grpc_to_json(self.slice_client.DeleteSlice(grpc_slice_id(context_uuid, slice_uuid))) class DeviceIds(_Resource): def get(self): - return format_grpc_to_json(self.client.ListDeviceIds(Empty())) + return format_grpc_to_json(self.context_client.ListDeviceIds(Empty())) class Devices(_Resource): def get(self): - return format_grpc_to_json(self.client.ListDevices(Empty())) + return format_grpc_to_json(self.context_client.ListDevices(Empty())) + + def post(self): + json_requests = request.get_json() + if 'devices' in json_requests: + json_requests = json_requests['devices'] + return [ + format_grpc_to_json(self.device_client.AddDevice(grpc_device(device))) + for device in json_requests + ] class Device(_Resource): def get(self, device_uuid : str): - return format_grpc_to_json(self.client.GetDevice(grpc_device_id(device_uuid))) + return format_grpc_to_json(self.context_client.GetDevice(grpc_device_id(device_uuid))) + + def put(self, device_uuid : str): + device = request.get_json() + if device_uuid != device['device_id']['device_uuid']['uuid']: + raise BadRequest('Mismatching device_uuid') + return format_grpc_to_json(self.device_client.ConfigureDevice(grpc_device(device))) + + def delete(self, device_uuid : str): + return format_grpc_to_json(self.device_client.DeleteDevice(grpc_device_id(device_uuid))) class LinkIds(_Resource): def get(self): - return format_grpc_to_json(self.client.ListLinkIds(Empty())) + return format_grpc_to_json(self.context_client.ListLinkIds(Empty())) class Links(_Resource): def get(self): - return format_grpc_to_json(self.client.ListLinks(Empty())) + return format_grpc_to_json(self.context_client.ListLinks(Empty())) + + def post(self): + json_requests = request.get_json() + if 'links' in json_requests: + json_requests = json_requests['links'] + return [ + format_grpc_to_json(self.context_client.SetLink(grpc_link(link))) + for link in json_requests + ] class Link(_Resource): def get(self, link_uuid : str): - return format_grpc_to_json(self.client.GetLink(grpc_link_id(link_uuid))) + return format_grpc_to_json(self.context_client.GetLink(grpc_link_id(link_uuid))) + + def put(self, link_uuid : str): + link = request.get_json() + if link_uuid != link['link_id']['link_uuid']['uuid']: + raise BadRequest('Mismatching link_uuid') + return format_grpc_to_json(self.context_client.SetLink(grpc_link(link))) + + def delete(self, link_uuid : str): + return format_grpc_to_json(self.context_client.RemoveLink(grpc_link_id(link_uuid))) class ConnectionIds(_Resource): def get(self, context_uuid : str, service_uuid : str): - return format_grpc_to_json(self.client.ListConnectionIds(grpc_service_id(context_uuid, service_uuid))) + return format_grpc_to_json(self.context_client.ListConnectionIds(grpc_service_id(context_uuid, service_uuid))) class Connections(_Resource): def get(self, context_uuid : str, service_uuid : str): - return format_grpc_to_json(self.client.ListConnections(grpc_service_id(context_uuid, service_uuid))) + return format_grpc_to_json(self.context_client.ListConnections(grpc_service_id(context_uuid, service_uuid))) class Connection(_Resource): def get(self, connection_uuid : str): - return format_grpc_to_json(self.client.GetConnection(grpc_connection_id(connection_uuid))) + return format_grpc_to_json(self.context_client.GetConnection(grpc_connection_id(connection_uuid))) class PolicyRuleIds(_Resource): def get(self): - return format_grpc_to_json(self.client.ListPolicyRuleIds(Empty())) + return format_grpc_to_json(self.context_client.ListPolicyRuleIds(Empty())) class PolicyRules(_Resource): def get(self): - return format_grpc_to_json(self.client.ListPolicyRules(Empty())) + return format_grpc_to_json(self.context_client.ListPolicyRules(Empty())) class PolicyRule(_Resource): def get(self, policy_rule_uuid : str): - return format_grpc_to_json(self.client.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid))) + return format_grpc_to_json(self.context_client.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid))) diff --git a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py index 1f69ffffb8c97a83591ec626920b57f40d032783..bb10ee375f0ecdf7b63459b300dd0ff0fed40615 100644 --- a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py +++ b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py @@ -12,21 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict from flask.json import jsonify from common.proto.context_pb2 import ( - ConnectionId, ContextId, DeviceId, LinkId, ServiceId, SliceId, TopologyId, Service, ServiceStatusEnum + ConnectionId, Context, ContextId, Device, DeviceId, Link, LinkId, + ServiceId, Slice, SliceId, Topology, TopologyId, Service ) -from common.proto.policy_pb2 import PolicyRuleId +from common.proto.policy_pb2 import PolicyRule, PolicyRuleId from common.tools.grpc.Tools import grpc_message_to_json from common.tools.object_factory.Connection import json_connection_id from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.ConfigRule import json_config_rule -from common.tools.object_factory.Constraint import json_constraint_custom -from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.Link import json_link_id from common.tools.object_factory.PolicyRule import json_policyrule_id -from common.tools.object_factory.Service import json_service_id, json_service +from common.tools.object_factory.Service import json_service_id from common.tools.object_factory.Slice import json_slice_id from common.tools.object_factory.Topology import json_topology_id @@ -40,51 +39,41 @@ def grpc_connection_id(connection_uuid): def grpc_context_id(context_uuid): return ContextId(**json_context_id(context_uuid)) +def grpc_context(json_context : Dict): + return Context(**json_context) + def grpc_device_id(device_uuid): return DeviceId(**json_device_id(device_uuid)) +def grpc_device(json_device : Dict): + return Device(**json_device) + def grpc_link_id(link_uuid): return LinkId(**json_link_id(link_uuid)) +def grpc_link(json_link : Dict): + return Link(**json_link) + def grpc_service_id(context_uuid, service_uuid): return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid))) -def grpc_service( - service_uuid, service_type, context_uuid, status=None, endpoint_ids=None, constraints=None, config_rules=None -): - json_context = json_context_id(context_uuid) - json_status = status if status else ServiceStatusEnum.SERVICESTATUS_PLANNED - json_endpoints_ids = [ - json_endpoint_id( - json_device_id(endpoint_id['device_id']['device_uuid']['uuid']), - endpoint_id['endpoint_uuid']['uuid'] - ) - for endpoint_id in endpoint_ids - ] if endpoint_ids else [] - json_constraints = [ - json_constraint_custom( - constraint['custom']['constraint_type'], - constraint['custom']['constraint_value'] - ) - for constraint in constraints - ] if constraints else [] - json_config_rules = [ - json_config_rule( - config_rule['action'], - config_rule['custom']['resource_key'], - config_rule['custom']['resource_value'] - ) - for config_rule in config_rules - ] if config_rules else [] - return Service(**json_service( - service_uuid, service_type, json_context, json_status, - json_endpoints_ids, json_constraints, json_config_rules)) +def grpc_service(json_service : Dict): + return Service(**json_service) def grpc_slice_id(context_uuid, slice_uuid): return SliceId(**json_slice_id(slice_uuid, context_id=json_context_id(context_uuid))) - + +def grpc_slice(json_slice : Dict): + return Slice(**json_slice) + def grpc_topology_id(context_uuid, topology_uuid): return TopologyId(**json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))) +def grpc_topology(json_topology : Dict): + return Topology(**json_topology) + def grpc_policy_rule_id(policy_rule_uuid): return PolicyRuleId(**json_policyrule_id(policy_rule_uuid)) + +def grpc_policy_rule(json_policy_rule : Dict): + return PolicyRule(**json_policy_rule) diff --git a/src/nbi/tests/data/ietf_acl.json b/src/nbi/tests/data/ietf_acl.json new file mode 100644 index 0000000000000000000000000000000000000000..072df6d01513db8e47e50ffd42fc6719a6715f77 --- /dev/null +++ b/src/nbi/tests/data/ietf_acl.json @@ -0,0 +1,56 @@ +{ + "ietf-access-control-list": { + "acls": { + "acl": [ + { + "name": "sample-ipv4-acl", + "type": "ipv4-acl-type", + "aces": { + "ace": [ + { + "name": "rule1", + "matches": { + "ipv4": { + "dscp": 18, + "source-ipv4-network": "128.32.10.6/24", + "destination-ipv4-network": "172.10.33.0/24" + }, + "tcp": { + "flags": "syn", + "source-port": { + "port": 1444, + "operator": "eq" + }, + "destination-port": { + "port": 1333, + "operator": "eq" + } + } + }, + "actions": { + "forwarding": "drop" + } + } + ] + } + } + ], + "attachment-points": { + "interface": [ + { + "interface-id": "200", + "ingress": { + "acl-sets": { + "acl-set": [ + { + "name": "sample-ipv4-acl" + } + ] + } + } + } + ] + } + } + } +} diff --git a/src/nbi/tests/ietf_acl_client.py b/src/nbi/tests/ietf_acl_client.py new file mode 100644 index 0000000000000000000000000000000000000000..155244a9261ec2a915512cd6e8f9f2df703b7868 --- /dev/null +++ b/src/nbi/tests/ietf_acl_client.py @@ -0,0 +1,89 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import requests, time +from typing import Optional +from requests.auth import HTTPBasicAuth + +BASE_URL = '{:s}://{:s}:{:d}/restconf/data' +ACLS_URL = '{:s}/device={:s}/ietf-access-control-list:acls' +ACL_URL = '{:s}/device={:s}/ietf-access-control-list:acl={:s}' + +CSG1_DEVICE_UUID = '118295c8-318a-52ec-a394-529fc4b70f2f' # router: 128.32.10.1 +ACL_NAME = 'sample-ipv4-acl' +ACL_RULE = {"ietf-access-control-list:acls": { + "acl": [{ + "name": "sample-ipv4-acl", "type": "ipv4-acl-type", + "aces": {"ace": [{ + "name": "rule1", + "matches": { + "ipv4": { + "source-ipv4-network": "128.32.10.6/24", + "destination-ipv4-network": "172.10.33.0/24", + "dscp": 18 + }, + "tcp": { + "source-port": {"operator": "eq", "port": 1444}, + "destination-port": {"operator": "eq", "port": 1333}, + "flags": "syn" + } + }, + "actions": {"forwarding": "drop"} + }]} + }], + "attachment-points": {"interface": [{ + "interface-id": "200", + "ingress": {"acl-sets": {"acl-set": [{"name": "sample-ipv4-acl"}]}} + }] +}}} + +class TfsIetfAclClient: + def __init__( + self, host : str = 'localhost', port : int = 80, schema : str = 'http', + username : Optional[str] = 'admin', password : Optional[str] = 'admin', + timeout : int = 10, allow_redirects : bool = True, verify : bool = False + ) -> None: + self._base_url = BASE_URL.format(schema, host, port) + auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None + self._settings = dict(auth=auth, timeout=timeout, allow_redirects=allow_redirects, verify=verify) + + def post(self, device_uuid : str, ietf_acl_data : dict) -> str: + request_url = ACLS_URL.format(self._base_url, device_uuid) + reply = requests.post(request_url, json=ietf_acl_data, **(self._settings)) + return reply.text + + def get(self, device_uuid : str, acl_name : str) -> str: + request_url = ACL_URL.format(self._base_url, device_uuid, acl_name) + reply = requests.get(request_url, **(self._settings)) + return reply.text + + def delete(self, device_uuid : str, acl_name : str) -> str: + request_url = ACL_URL.format(self._base_url, device_uuid, acl_name) + reply = requests.delete(request_url, **(self._settings)) + return reply.text + +def main(): + client = TfsIetfAclClient() + print(f'ACL rule: {ACL_RULE}') + post_response = client.post(CSG1_DEVICE_UUID, ACL_RULE) + print(f'post response: {post_response}') + time.sleep(.5) + get_response = client.get(CSG1_DEVICE_UUID, ACL_NAME) + print(f'get response: {get_response}') + time.sleep(.5) + delete_response = client.delete(CSG1_DEVICE_UUID, ACL_NAME) + print(f'delete response: {delete_response}') + +if __name__ == '__main__': + main() diff --git a/src/nbi/tests/test_yang_acl.py b/src/nbi/tests/test_yang_acl.py new file mode 100644 index 0000000000000000000000000000000000000000..607001870fa69e79bd7ef53fa92d88bbf353e45e --- /dev/null +++ b/src/nbi/tests/test_yang_acl.py @@ -0,0 +1,104 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, json, libyang, logging, os +from typing import Dict, List, Optional + +LOGGER = logging.getLogger(__name__) + +YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang') +YANG_MODULES = [ + 'ietf-yang-types', + 'ietf-interfaces', + 'iana-if-type', + 'ietf-access-control-list', +] + +class YangValidator: + def __init__(self) -> None: + self._yang_context = libyang.Context(YANG_DIR) + for module_name in YANG_MODULES: + LOGGER.info('Loading module: {:s}'.format(str(module_name))) + yang_module = self._yang_context.load_module(module_name) + yang_module.feature_enable_all() + yang_module_prefix = yang_module.prefix() + LOGGER.info(' Prefix: {:s}'.format(str(yang_module_prefix))) + + def parse_to_dict(self, message : Dict, interface_names : List[str]) -> Dict: + interfaces = self._yang_context.create_data_path('/ietf-interfaces:interfaces') + for if_index,interface_name in enumerate(interface_names): + if_path = 'interface[name="{:s}"]'.format(str(interface_name)) + interface = interfaces.create_path(if_path) + interface.create_path('if-index', if_index + 1) + interface.create_path('type', 'iana-if-type:ethernetCsmacd') + interface.create_path('admin-status', 'up') + interface.create_path('oper-status', 'up') + statistics = interface.create_path('statistics') + statistics.create_path('discontinuity-time', '2024-07-11T10:00:00.000000Z') + + message = copy.deepcopy(message) + message['ietf-interfaces:interfaces'] = interfaces.print_dict()['interfaces'] + + dnode : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(message), 'json', validate_present=True, strict=True + ) + if dnode is None: raise Exception('Unable to parse Message({:s})'.format(str(message))) + message = dnode.print_dict() + dnode.free() + interfaces.free() + return message + + def destroy(self) -> None: + self._yang_context.destroy() + self._yang_context = None + +def main() -> None: + import uuid # pylint: disable=import-outside-toplevel + logging.basicConfig(level=logging.DEBUG) + + interface_names = {'200', '500', str(uuid.uuid4()), str(uuid.uuid4())} + ACL_RULE = {"ietf-access-control-list:acls": { + "acl": [{ + "name": "sample-ipv4-acl", "type": "ipv4-acl-type", + "aces": {"ace": [{ + "name": "rule1", + "matches": { + "ipv4": { + "source-ipv4-network": "128.32.10.6/24", + "destination-ipv4-network": "172.10.33.0/24", + "dscp": 18 + }, + "tcp": { + "source-port": {"operator": "eq", "port": 1444}, + "destination-port": {"operator": "eq", "port": 1333}, + "flags": "syn" + } + }, + "actions": {"forwarding": "drop"} + }]} + }], + "attachment-points": {"interface": [{ + "interface-id": "200", + "ingress": {"acl-sets": {"acl-set": [{"name": "sample-ipv4-acl"}]}} + }] + }}} + + yang_validator = YangValidator() + request_data = yang_validator.parse_to_dict(ACL_RULE, list(interface_names)) + yang_validator.destroy() + + LOGGER.info('request_data = {:s}'.format(str(request_data))) + +if __name__ == '__main__': + main() diff --git a/src/opticalattackdetector/requirements.in b/src/opticalattackdetector/requirements.in index 39982773b7bbd14a680aa3b26173e8fbcecd88fd..e8476e9faebacd73ff570de43f6417f4f32e23a0 100644 --- a/src/opticalattackdetector/requirements.in +++ b/src/opticalattackdetector/requirements.in @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -numpy +numpy<2.0.0 redis diff --git a/src/opticalcontroller/requirements.in b/src/opticalcontroller/requirements.in index 0b1947bee2c7f1e89491dff4f7589d3465d28c38..4732ee635a60b8320e25cd2c26388d1cfdfd25cc 100644 --- a/src/opticalcontroller/requirements.in +++ b/src/opticalcontroller/requirements.in @@ -17,5 +17,5 @@ flask-restplus==0.13.0 itsdangerous==1.1.0 Jinja2==2.11.3 MarkupSafe==1.1.1 -numpy==1.23.0 +numpy<2.0.0 Werkzeug==0.16.1 diff --git a/src/pathcomp/frontend/requirements.in b/src/pathcomp/frontend/requirements.in index 0466b25dc1e326d72735c02aa9b581264dd02620..602ecff548366217e24331721bd0bec7afff8e04 100644 --- a/src/pathcomp/frontend/requirements.in +++ b/src/pathcomp/frontend/requirements.in @@ -13,6 +13,7 @@ # limitations under the License. +numpy<2.0.0 pandas==1.5.* requests==2.27.1 scikit-learn==1.1.* diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py index 0c98254729afd0b2089c84499a9c739e985b27f5..f92f9b2fff11ab585813ab59e07c463f361413d2 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py @@ -184,6 +184,10 @@ def compose_device_config_rules( device_endpoint_keys = set(itertools.product(device_keys, endpoint_keys)) if len(device_endpoint_keys.intersection(endpoints_traversed)) == 0: continue + + # TODO: check if vlan needs to be removed from config_rule + #config_rule.custom.resource_key = re.sub('\/vlan\[[^\]]+\]', '', config_rule.custom.resource_key) + subservice_config_rules.append(config_rule) else: continue diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py index 4e9ceabc3add1bae949f751f36e2a6f8cb237fa6..7527932877f47a092a35f286defb3744e05db109 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py @@ -12,24 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple +from typing import Any, Dict, List, Optional, Tuple from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode +def get_value(field_name : str, *containers, default=None) -> Optional[Any]: + if len(containers) == 0: raise Exception('No containers specified') + for container in containers: + if field_name not in container: continue + return container[field_name] + return default + def setup_config_rules( service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, - service_settings : TreeNode, endpoint_settings : TreeNode, endpoint_acls : List [Tuple] + service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode, endpoint_acls : List [Tuple] ) -> List[Dict]: if service_settings is None: return [] + if device_settings is None: return [] if endpoint_settings is None: return [] json_settings : Dict = service_settings.value + json_device_settings : Dict = device_settings.value json_endpoint_settings : Dict = endpoint_settings.value - mtu = json_settings.get('mtu', 1450 ) # 1512 + settings = (json_settings, json_endpoint_settings, json_device_settings) + + mtu = get_value('mtu', *settings, default=1450) # 1512 #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - bgp_as = json_settings.get('bgp_as', 65000 ) # 65000 + bgp_as = get_value('bgp_as', *settings, default=65000) # 65000 router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' route_distinguisher = json_settings.get('route_distinguisher', '65000:101' ) # '60001:801' @@ -76,6 +87,7 @@ def setup_config_rules( 'name': network_instance_name, 'protocol_name': 'BGP', 'identifier': 'BGP', + 'type': 'L3VRF', 'as': bgp_as, 'router_id': router_id, }), @@ -88,7 +100,6 @@ def setup_config_rules( 'protocol_name': 'DIRECTLY_CONNECTED', }), - #Add STATIC protocol to network instance json_config_rule_set( '/network_instance[{:s}]/protocols[STATIC]'.format(network_instance_name), { @@ -114,6 +125,7 @@ def setup_config_rules( json_config_rule_set( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { 'name' : network_instance_name, + 'type' : 'L3VRF', 'id' : if_subif_name, 'interface' : if_subif_name, 'subinterface': sub_interface_index, @@ -183,6 +195,7 @@ def setup_config_rules( }), ] + for res_key, res_value in endpoint_acls: json_config_rules.append( {'action': 1, 'acl': res_value} @@ -191,23 +204,27 @@ def setup_config_rules( def teardown_config_rules( service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, - service_settings : TreeNode, endpoint_settings : TreeNode + service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: if service_settings is None: return [] + if device_settings is None: return [] if endpoint_settings is None: return [] json_settings : Dict = service_settings.value + json_device_settings : Dict = device_settings.value json_endpoint_settings : Dict = endpoint_settings.value + settings = (json_settings, json_endpoint_settings, json_device_settings) + service_short_uuid = service_uuid.split('-')[-1] network_instance_name = '{:s}-NetInst'.format(service_short_uuid) #network_interface_desc = '{:s}-NetIf'.format(service_uuid) #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) - #mtu = json_settings.get('mtu', 1450 ) # 1512 + #mtu = get_value('mtu', *settings, default=1450) # 1512 #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - #bgp_as = json_settings.get('bgp_as', 65000 ) # 65000 + #bgp_as = get_value('bgp_as', *settings, default=65000) # 65000 route_distinguisher = json_settings.get('route_distinguisher', '0:0' ) # '60001:801' #sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules_test_ocnos.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules_test_ocnos.py new file mode 100644 index 0000000000000000000000000000000000000000..5fa1d0b5b6931902b4ac50847c90bf67738032ba --- /dev/null +++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules_test_ocnos.py @@ -0,0 +1,337 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Tuple +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from service.service.service_handler_api.AnyTreeTools import TreeNode + +def setup_config_rules( + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, + service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode, endpoint_acls : List [Tuple] +) -> List[Dict]: + + if service_settings is None: return [] + if device_settings is None: return [] + if endpoint_settings is None: return [] + + json_settings : Dict = service_settings.value + json_device_settings : Dict = device_settings.value + json_endpoint_settings : Dict = endpoint_settings.value + + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + bgp_as = json_device_settings.get('bgp_as', 65000 ) # 65000 + + router_id = json_device_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + route_distinguisher = json_device_settings.get('route_distinguisher', '65000:101' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + + policy_import = json_device_settings.get('policy_AZ', '2' ) # 2 + policy_export = json_device_settings.get('policy_ZA', '7' ) # 30 + #network_interface_desc = '{:s}-NetIf'.format(service_uuid) + network_interface_desc = json_endpoint_settings.get('ni_description','') + #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) + network_subinterface_desc = json_endpoint_settings.get('subif_description','') + #service_short_uuid = service_uuid.split('-')[-1] + #network_instance_name = '{:s}-NetInst'.format(service_short_uuid) + network_instance_name = json_endpoint_settings.get('ni_name', service_uuid.split('-')[-1]) #ELAN-AC:1 + + self_bgp_if_name = json_device_settings.get('self_bgp_interface_name', '') + self_bgp_address_ip = json_device_settings.get('self_bgp_interface_address_ip', '') + bgp_address_prefix = json_device_settings.get('bgp_interface_address_prefix', '') + bgp_sub_interface_index = json_device_settings.get('self_bgp_sub_interface_index', 0) + neighbor_bgp_if_address_ip= json_device_settings.get('neighbor_bgp_interface_address_ip', '0.0.0.0') # '2.2.2.1' + + # if_subif_name = '{:s}.{:d}'.format(endpoint_name, 0) + if_subif_name = '{:s}'.format(endpoint_name[5:]) + + json_config_rules = [ + # Configure Interface (not used) + #json_config_rule_set( + # '/interface[{:s}]'.format(endpoint_name), { + # 'name': endpoint_name, + # 'description': network_interface_desc, + # 'mtu': mtu, + #}), + + #Create network instance + json_config_rule_set( + '/network_instance[{:s}]'.format(network_instance_name), { + 'name': network_instance_name, + 'description': network_interface_desc, + 'type': 'L3VRF', + 'route_distinguisher': route_distinguisher, + 'router_id': router_id, + #'address_families': address_families, + }), + + #Add BGP protocol to network instance + json_config_rule_set( + '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { + 'name': network_instance_name, + 'protocol_name': bgp_as, + 'identifier': 'BGP', + 'type': 'L3VRF', + 'as': bgp_as, + 'router_id': router_id, + 'neighbors': [{'ip_address': neighbor_bgp_if_address_ip, 'remote_as': bgp_as}] + }), + + #Add DIRECTLY CONNECTED protocol to network instance + json_config_rule_set( + '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(network_instance_name), { + 'name': network_instance_name, + 'identifier': 'DIRECTLY_CONNECTED', + 'protocol_name': 'DIRECTLY_CONNECTED', + }), + + #Add STATIC protocol to network instance + json_config_rule_set( + '/network_instance[{:s}]/protocols[STATIC]'.format(network_instance_name), { + 'name': network_instance_name, + 'identifier': 'STATIC', + 'protocol_name': 'STATIC', + }), + + #Create interface with subinterface (without IP address) + json_config_rule_set( + '/interface[{:s}]/subinterface[{:d}]'.format(if_subif_name, sub_interface_index), { + 'name' : if_subif_name, + 'type' :'ethernetCsmacd', + 'mtu' : mtu, + 'index' : sub_interface_index, + 'description': network_subinterface_desc, + 'vlan_id' : vlan_id, + }), + + #Associate interface to network instance + json_config_rule_set( + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { + 'name' : network_instance_name, + 'type' : 'L3VRF', + 'id' : if_subif_name, + 'interface' : if_subif_name, + 'subinterface' : sub_interface_index, + 'address_ip' : address_ip, + 'address_prefix': address_prefix, + }), + + #Create interface with subinterface (with IP address) + json_config_rule_set( + '/interface[{:s}]/subinterface[{:d}]'.format(if_subif_name, sub_interface_index), { + 'name' : if_subif_name, + 'type' :'ethernetCsmacd', + 'mtu' : mtu, + 'index' : sub_interface_index, + 'description' : network_subinterface_desc, + 'vlan_id' : vlan_id, + 'address_ip' : address_ip, + 'address_prefix': address_prefix, + }), + + json_config_rule_set( + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, self_bgp_if_name), { + 'name' : network_instance_name, + 'type' : 'L3VRF', + 'id' : self_bgp_if_name, + 'interface' : self_bgp_if_name, + 'subinterface': bgp_sub_interface_index, + 'address_ip' : self_bgp_address_ip, + 'address_prefix': bgp_address_prefix, + }), + + #Create routing policy + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import,route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'ext_community_member' : route_distinguisher, + }), + json_config_rule_set( + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(policy_import, policy_import), { + 'policy_name' : policy_import, + 'statement_name' : 'stm_{:s}'.format(policy_import), # OCNOS: '10', + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'policy_result' : 'ACCEPT_ROUTE', + }), + + #Associate routing policy to network instance + json_config_rule_set( + '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_import), { + 'name' : network_instance_name, + 'import_policy': policy_import, + }), + + #Create routing policy + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'ext_community_member' : route_distinguisher, + }), + json_config_rule_set( + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(policy_export, policy_export), { + 'policy_name' : policy_export, + 'statement_name' : 'stm_{:s}'.format(policy_export), # OCNOS: '10', + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'policy_result' : 'ACCEPT_ROUTE', + }), + + #Associate routing policy to network instance + json_config_rule_set( + '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_export),{ + 'name' : network_instance_name, + 'export_policy': policy_export, + }), + + #Create table connections + json_config_rule_set( + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(network_instance_name), { + 'name' : network_instance_name, + 'src_protocol' : 'DIRECTLY_CONNECTED', + 'dst_protocol' : 'BGP', + 'address_family' : 'IPV4', + 'default_import_policy': 'ACCEPT_ROUTE', + 'as' : bgp_as, + }), + + json_config_rule_set( + '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { + 'name' : network_instance_name, + 'src_protocol' : 'STATIC', + 'dst_protocol' : 'BGP', + 'address_family' : 'IPV4', + 'default_import_policy': 'ACCEPT_ROUTE', + 'as' : bgp_as, + }), + + ] + + for res_key, res_value in endpoint_acls: + json_config_rules.append( + {'action': 1, 'acl': res_value} + ) + return json_config_rules + +def teardown_config_rules( + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, + service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode +) -> List[Dict]: + + if service_settings is None: return [] + if device_settings is None: return [] + if endpoint_settings is None: return [] + + json_settings : Dict = service_settings.value + json_device_settings : Dict = device_settings.value + json_endpoint_settings : Dict = endpoint_settings.value + + service_short_uuid = service_uuid.split('-')[-1] + # network_instance_name = '{:s}-NetInst'.format(service_short_uuid) + network_instance_name = json_endpoint_settings.get('ni_name', service_short_uuid) #ELAN-AC:1 + #network_interface_desc = '{:s}-NetIf'.format(service_uuid) + # network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) + network_subinterface_desc = '' + + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + #bgp_as = json_device_settings.get('bgp_as', 65000 ) # 65000 + route_distinguisher = json_device_settings.get('route_distinguisher', '0:0' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + #router_id = json_device_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + policy_import = json_device_settings.get('policy_AZ', '2' ) # 2 + policy_export = json_device_settings.get('policy_ZA', '7' ) # 30 + + self_bgp_if_name = json_device_settings.get('self_bgp_interface_name', '') + self_bgp_address_ip = json_device_settings.get('self_bgp_interface_address_ip', '') + bgp_address_prefix = json_device_settings.get('bgp_interface_address_prefix', '') + bgp_sub_interface_index = json_device_settings.get('self_bgp_sub_interface_index', 0) + + # if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) + if_subif_name = '{:s}'.format(endpoint_name[5:]) + + json_config_rules = [ + #Delete export routing policy + json_config_rule_delete( + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(policy_export, policy_export), { + 'policy_name' : policy_export, + 'statement_name' : 'stm_{:s}'.format(policy_export), # OCNOS: '10', + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'policy_result' : 'ACCEPT_ROUTE', + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'ext_community_member' : route_distinguisher, + }), + + #Delete import routing policy + json_config_rule_delete( + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(policy_import, policy_import), { + 'policy_name' : policy_import, + 'statement_name' : 'stm_{:s}'.format(policy_import), # OCNOS: '10', + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'policy_result' : 'ACCEPT_ROUTE', + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'ext_community_member' : route_distinguisher, + }), + + #Delete interface; automatically deletes: + # - /interface[]/subinterface[] + # json_config_rule_delete('/interface[{:s}]/subinterface[0]'.format(if_subif_name), + # { + # 'name': if_subif_name, + # }), + + #Delete network instance; automatically deletes: + # - /network_instance[]/interface[] + # - /network_instance[]/protocols[] + # - /network_instance[]/inter_instance_policies[] + + #Associate interface to network instance + json_config_rule_set( + '/network_instance[{:s}]/interface[{:s}]'.format('default', if_subif_name), { + 'name' : 'default', + 'id' : if_subif_name, + 'interface' : if_subif_name, + 'subinterface': sub_interface_index, + 'address_ip' : address_ip, + 'address_prefix': address_prefix, + }), + json_config_rule_set( + '/network_instance[{:s}]/interface[{:s}]'.format('default', self_bgp_if_name), { + 'name' : 'default', + 'id' : self_bgp_if_name, + 'interface' : self_bgp_if_name, + 'subinterface': bgp_sub_interface_index, + 'address_ip' : self_bgp_address_ip, + 'address_prefix': bgp_address_prefix, + }), + json_config_rule_delete('/network_instance[{:s}]'.format(network_instance_name), + { + 'name': network_instance_name + }), + ] + return json_config_rules diff --git a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py index d714946d128284d60422779bc6286b64ee0244a7..2c944bfe4772b2f55ff16d1a3c726af3ee4c6e8f 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py @@ -52,6 +52,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_settings = self.__settings_handler.get_device_settings(device_obj) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) endpoint_acls = self.__settings_handler.get_endpoint_acls(device_obj, endpoint_obj) @@ -59,7 +60,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): json_config_rules = setup_config_rules( service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, - settings, endpoint_settings, endpoint_acls) + settings, device_settings, endpoint_settings, endpoint_acls) if len(json_config_rules) > 0: del device_obj.device_config.config_rules[:] @@ -90,13 +91,14 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_settings = self.__settings_handler.get_device_settings(device_obj) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) endpoint_name = endpoint_obj.name json_config_rules = teardown_config_rules( service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, - settings, endpoint_settings) + settings, device_settings, endpoint_settings) if len(json_config_rules) > 0: del device_obj.device_config.config_rules[:] diff --git a/src/slice/requirements.in b/src/slice/requirements.in index f2e7219e38a9b76bee5c1ae9e95544d1bc38065a..158355b697b14265c7ce965953c9d75b9bfdea65 100644 --- a/src/slice/requirements.in +++ b/src/slice/requirements.in @@ -13,7 +13,7 @@ # limitations under the License. #deepdiff==5.8.* -numpy==1.23.* +numpy<2.0.0 pandas==1.5.* questdb==1.0.1 requests==2.27.* diff --git a/src/telemetry/.gitlab-ci.yml b/src/telemetry/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..110a6490d20558c6589550be45b6432e500ba9d6 --- /dev/null +++ b/src/telemetry/.gitlab-ci.yml @@ -0,0 +1,203 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build telemetry: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + # This first build tags the builder resulting image to prevent being removed by dangling image removal command + # - docker buildx build -t "${IMAGE_NAME}-backend:${IMAGE_TAG}-builder" --target builder -f ./src/$IMAGE_NAME/backend/Dockerfile . + - docker buildx build -t "${IMAGE_NAME}-frontend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/frontend/Dockerfile . + - docker buildx build -t "${IMAGE_NAME}-backend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/backend/Dockerfile . + - docker tag "${IMAGE_NAME}-frontend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker tag "${IMAGE_NAME}-backend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/.gitlab-ci.yml + - src/$IMAGE_NAME/frontend/**/*.{py,in,yml} + - src/$IMAGE_NAME/frontend/Dockerfile + - src/$IMAGE_NAME/frontend/tests/*.py + - src/$IMAGE_NAME/backend/Dockerfile + - src/$IMAGE_NAME/backend/**/*.{py,in,yml} + - src/$IMAGE_NAME/backend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + +# Apply unit test to the component +unit_test telemetry-backend: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build telemetry + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + # - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME-backend -d -p 30060:30060 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/backend/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG + - docker ps -a + - sleep 5 + - docker logs ${IMAGE_NAME}-backend + - > + docker exec -i ${IMAGE_NAME}-backend bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-backend_report.xml $IMAGE_NAME/backend/tests/test_*.py" + - docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + - docker rm -f ${IMAGE_NAME}-backend + - docker rm -f zookeeper + - docker rm -f kafka + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/backend/**/*.{py,in,yml} + - src/$IMAGE_NAME/backend/Dockerfile + - src/$IMAGE_NAME/backend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/backend/tests/${IMAGE_NAME}-backend_report.xml + +# Apply unit test to the component +unit_test telemetry-frontend: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build telemetry + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi + - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - docker pull "cockroachdb/cockroach:latest-v22.2" + - docker volume create crdb + - > + docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 + --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 + --volume "crdb:/cockroach/cockroach-data" + cockroachdb/cockroach:latest-v22.2 start-single-node + - echo "Waiting for initialization..." + - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done + # - docker logs crdb + # - docker ps -a + - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $CRDB_ADDRESS + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \ + -e ALLOW_ANONYMOUS_LOGIN=yes \ + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + # - docker logs zookeeper + # - docker logs kafka + - > + docker run --name $IMAGE_NAME-frontend -d -p 30050:30050 + --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/frontend/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG + - docker ps -a + - sleep 5 + - docker logs ${IMAGE_NAME}-frontend + - > + docker exec -i ${IMAGE_NAME}-frontend bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_*.py" + - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker volume rm -f crdb + - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + - docker rm -f ${IMAGE_NAME}-frontend + - docker rm -f zookeeper + - docker rm -f kafka + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/frontend/**/*.{py,in,yml} + - src/$IMAGE_NAME/frontend/Dockerfile + - src/$IMAGE_NAME/frontend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml \ No newline at end of file diff --git a/src/telemetry/README.md b/src/telemetry/README.md new file mode 100644 index 0000000000000000000000000000000000000000..da43bd471c384ae9133871a097e94043f70ed7de --- /dev/null +++ b/src/telemetry/README.md @@ -0,0 +1,10 @@ +# How to locally run and test Telemetry service + +### Pre-requisets +The following requirements should be fulfilled before the execuation of Telemetry service. + +1. verify that [telmetry_frontend.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/telemetry_frontend.proto) file exists and grpcs file are generated sucessfully. +2. virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/telemetry_virenv.txt) are installed sucessfully. +3. verify the creation of required database and table. +[DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/database/tests/managementDBtests.py) python file enlist the functions to create tables and database. +[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_manager/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment. diff --git a/src/telemetry/__init__.py b/src/telemetry/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..234a1af6588c91f6a17f3963f69120cd6e2248d9 --- /dev/null +++ b/src/telemetry/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/backend/Dockerfile b/src/telemetry/backend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..eebfe24ab3ca457b9d05b02a07f4b28d6f196987 --- /dev/null +++ b/src/telemetry/backend/Dockerfile @@ -0,0 +1,69 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/telemetry/backend +WORKDIR /var/teraflow/telemetry/backend +COPY src/telemetry/backend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/telemetry/__init__.py telemetry/__init__.py +COPY src/telemetry/backend/. telemetry/backend/ + +# Start the service +ENTRYPOINT ["python", "-m", "telemetry.backend.service"] diff --git a/src/telemetry/backend/__init__.py b/src/telemetry/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/telemetry/backend/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/telemetry/backend/requirements.in b/src/telemetry/backend/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..e6a559be714faa31196206dbbdc53788506369b5 --- /dev/null +++ b/src/telemetry/backend/requirements.in @@ -0,0 +1,15 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +confluent-kafka==2.3.* diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py new file mode 100755 index 0000000000000000000000000000000000000000..6ab841238f446a2895cd163fab4b7eb05eaa3176 --- /dev/null +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -0,0 +1,239 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import time +import random +import logging +import threading +from typing import Any, Dict +# from common.proto.context_pb2 import Empty +from confluent_kafka import Producer as KafkaProducer +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import KafkaError +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from common.method_wrappers.Decorator import MetricsPool +from common.tools.service.GenericGrpcService import GenericGrpcService + +LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('TelemetryBackend', 'backendService') + +class TelemetryBackendService(GenericGrpcService): + """ + Class listens for request on Kafka topic, fetches requested metrics from device. + Produces metrics on both RESPONSE and VALUE kafka topics. + """ + def __init__(self, cls_name : str = __name__) -> None: + LOGGER.info('Init TelemetryBackendService') + port = get_service_port_grpc(ServiceNameEnum.TELEMETRYBACKEND) + super().__init__(port, cls_name=cls_name) + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'backend', + 'auto.offset.reset' : 'latest'}) + self.running_threads = {} + + def install_servicers(self): + threading.Thread(target=self.RequestListener).start() + + def RequestListener(self): + """ + listener for requests on Kafka topic. + """ + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.REQUEST.value]) + while True: + receive_msg = consumer.poll(2.0) + if receive_msg is None: + continue + elif receive_msg.error(): + if receive_msg.error().code() == KafkaError._PARTITION_EOF: + continue + else: + print("Consumer error: {}".format(receive_msg.error())) + break + + collector = json.loads(receive_msg.value().decode('utf-8')) + collector_id = receive_msg.key().decode('utf-8') + LOGGER.debug('Recevied Collector: {:} - {:}'.format(collector_id, collector)) + print('Recevied Collector: {:} - {:}'.format(collector_id, collector)) + + if collector['duration'] == -1 and collector['interval'] == -1: + self.TerminateCollectorBackend(collector_id) + else: + self.RunInitiateCollectorBackend(collector_id, collector) + + def TerminateCollectorBackend(self, collector_id): + if collector_id in self.running_threads: + thread, stop_event = self.running_threads[collector_id] + stop_event.set() + thread.join() + print ("Terminating backend (by StopCollector): Collector Id: ", collector_id) + del self.running_threads[collector_id] + self.GenerateCollectorResponse(collector_id, "-1", -1) # Termination confirmation to frontend. + else: + print ('Backend collector {:} not found'.format(collector_id)) + + def RunInitiateCollectorBackend(self, collector_id: str, collector: str): + stop_event = threading.Event() + thread = threading.Thread(target=self.InitiateCollectorBackend, + args=(collector_id, collector, stop_event)) + self.running_threads[collector_id] = (thread, stop_event) + thread.start() + + def InitiateCollectorBackend(self, collector_id, collector, stop_event): + """ + Method receives collector request and initiates collecter backend. + """ + print("Initiating backend for collector: ", collector_id) + start_time = time.time() + while not stop_event.is_set(): + if time.time() - start_time >= collector['duration']: # condition to terminate backend + print("Execuation duration completed: Terminating backend: Collector Id: ", collector_id, " - ", time.time() - start_time) + self.GenerateCollectorResponse(collector_id, "-1", -1) # Termination confirmation to frontend. + break + self.ExtractKpiValue(collector_id, collector['kpi_id']) + time.sleep(collector['interval']) + + def ExtractKpiValue(self, collector_id: str, kpi_id: str): + """ + Method to extract kpi value. + """ + measured_kpi_value = random.randint(1,100) # TODO: To be extracted from a device + print ("Measured Kpi value: {:}".format(measured_kpi_value)) + # measured_kpi_value = self.fetch_node_exporter_metrics() # exporter extracted metric value against default KPI + self.GenerateCollectorResponse(collector_id, kpi_id , measured_kpi_value) + + def GenerateCollectorResponse(self, collector_id: str, kpi_id: str, measured_kpi_value: Any): + """ + Method to write kpi value on RESPONSE Kafka topic + """ + producer = self.kafka_producer + kpi_value : Dict = { + "kpi_id" : kpi_id, + "kpi_value" : measured_kpi_value + } + producer.produce( + KafkaTopic.RESPONSE.value, + key = collector_id, + value = json.dumps(kpi_value), + callback = self.delivery_callback + ) + producer.flush() + + def GenerateRawMetric(self, metrics: Any): + """ + Method writes raw metrics on VALUE Kafka topic + """ + producer = self.kafka_producer + some_metric : Dict = { + "some_id" : metrics + } + producer.produce( + KafkaTopic.VALUE.value, + key = 'raw', + value = json.dumps(some_metric), + callback = self.delivery_callback + ) + producer.flush() + + def delivery_callback(self, err, msg): + """ + Callback function to handle message delivery status. + Args: err (KafkaError): Kafka error object. + msg (Message): Kafka message object. + """ + if err: print(f'Message delivery failed: {err}') + # else: print(f'Message delivered to topic {msg.topic()}') + +# # ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter ----------- +# @staticmethod +# def fetch_single_node_exporter_metric(): +# """ +# Method to fetch metrics from Node Exporter. +# Returns: +# str: Metrics fetched from Node Exporter. +# """ +# KPI = "node_network_receive_packets_total" +# try: +# response = requests.get(EXPORTER_ENDPOINT) # type: ignore +# LOGGER.info("Request status {:}".format(response)) +# if response.status_code == 200: +# # print(f"Metrics fetched sucessfully...") +# metrics = response.text +# # Check if the desired metric is available in the response +# if KPI in metrics: +# KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI) +# # Extract the metric value +# if KPI_VALUE is not None: +# LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE)) +# print(f"Extracted value of {KPI} is: {KPI_VALUE}") +# return KPI_VALUE +# else: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code)) +# # print(f"Failed to fetch metrics. Status code: {response.status_code}") +# return None +# except Exception as e: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) +# # print(f"Failed to fetch metrics: {str(e)}") +# return None + +# @staticmethod +# def extract_metric_value(metrics, metric_name): +# """ +# Method to extract the value of a metric from the metrics string. +# Args: +# metrics (str): Metrics string fetched from Exporter. +# metric_name (str): Name of the metric to extract. +# Returns: +# float: Value of the extracted metric, or None if not found. +# """ +# try: +# # Find the metric line containing the desired metric name +# metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name)) +# # Split the line to extract the metric value +# metric_value = float(metric_line.split()[1]) +# return metric_value +# except StopIteration: +# print(f"Metric '{metric_name}' not found in the metrics.") +# return None + +# @staticmethod +# def stream_node_export_metrics_to_raw_topic(): +# try: +# while True: +# response = requests.get(EXPORTER_ENDPOINT) +# # print("Response Status {:} ".format(response)) +# # LOGGER.info("Response Status {:} ".format(response)) +# try: +# if response.status_code == 200: +# producerObj = KafkaProducer(PRODUCER_CONFIG) +# producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback) +# producerObj.flush() +# LOGGER.info("Produce to topic") +# else: +# LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code)) +# print(f"Didn't received expected response. Status code: {response.status_code}") +# return None +# time.sleep(15) +# except Exception as e: +# LOGGER.info("Failed to process response. Status code: {:}".format(e)) +# return None +# except Exception as e: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) +# print(f"Failed to fetch metrics: {str(e)}") +# return None +# # ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter ----------- \ No newline at end of file diff --git a/src/telemetry/backend/service/__init__.py b/src/telemetry/backend/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/telemetry/backend/service/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/telemetry/backend/service/__main__.py b/src/telemetry/backend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec9e191fd22e07da46f80214ade0ac516032433 --- /dev/null +++ b/src/telemetry/backend/service/__main__.py @@ -0,0 +1,56 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port +from .TelemetryBackendService import TelemetryBackendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + grpc_service = TelemetryBackendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/telemetry/backend/tests/__init__.py b/src/telemetry/backend/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02 --- /dev/null +++ b/src/telemetry/backend/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/telemetry/backend/tests/messagesBackend.py b/src/telemetry/backend/tests/messagesBackend.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf553eaaec41de7599b6723e31e4ca3f82cbcae --- /dev/null +++ b/src/telemetry/backend/tests/messagesBackend.py @@ -0,0 +1,15 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + diff --git a/src/telemetry/backend/tests/test_TelemetryBackend.py b/src/telemetry/backend/tests/test_TelemetryBackend.py new file mode 100644 index 0000000000000000000000000000000000000000..a2bbee540c3ce348ef52eceb0e776f48a68d94b1 --- /dev/null +++ b/src/telemetry/backend/tests/test_TelemetryBackend.py @@ -0,0 +1,38 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.tools.kafka.Variables import KafkaTopic +from telemetry.backend.service.TelemetryBackendService import TelemetryBackendService + + +LOGGER = logging.getLogger(__name__) + + +########################### +# Tests Implementation of Telemetry Backend +########################### + +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +def test_RunRequestListener(): + LOGGER.info('test_RunRequestListener') + TelemetryBackendServiceObj = TelemetryBackendService() + response = TelemetryBackendServiceObj.RunRequestListener() + LOGGER.debug(str(response)) + assert isinstance(response, bool) diff --git a/src/telemetry/database/TelemetryEngine.py b/src/telemetry/database/TelemetryEngine.py new file mode 100644 index 0000000000000000000000000000000000000000..7c8620faf25e695e7f971bce78be9ad208a7701b --- /dev/null +++ b/src/telemetry/database/TelemetryEngine.py @@ -0,0 +1,40 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sqlalchemy +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' + +class TelemetryEngine: + @staticmethod + def get_engine() -> sqlalchemy.engine.Engine: + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = "tfs-telemetry" # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) + try: + engine = sqlalchemy.create_engine(crdb_uri, echo=False) + LOGGER.info(' TelemetryDB initalized with DB URL: {:}'.format(crdb_uri)) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) + return None # type: ignore + return engine diff --git a/src/telemetry/database/TelemetryModel.py b/src/telemetry/database/TelemetryModel.py new file mode 100644 index 0000000000000000000000000000000000000000..4e71ce8138af39e51c80791dbd6683d855231d7b --- /dev/null +++ b/src/telemetry/database/TelemetryModel.py @@ -0,0 +1,73 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy import Column, String, Float +from sqlalchemy.orm import registry +from common.proto import telemetry_frontend_pb2 + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +# Create a base class for declarative models +Base = registry().generate_base() + +class Collector(Base): + __tablename__ = 'collector' + + collector_id = Column(UUID(as_uuid=False), primary_key=True) + kpi_id = Column(UUID(as_uuid=False), nullable=False) + sampling_duration_s = Column(Float , nullable=False) + sampling_interval_s = Column(Float , nullable=False) + start_timestamp = Column(Float , nullable=False) + end_timestamp = Column(Float , nullable=False) + + # helps in logging the information + def __repr__(self): + return (f"<Collector(collector_id='{self.collector_id}' , kpi_id='{self.kpi_id}', " + f"sampling_duration_s='{self.sampling_duration_s}', sampling_interval_s='{self.sampling_interval_s}'," + f"start_timestamp='{self.start_timestamp}' , end_timestamp='{self.end_timestamp}')>") + + @classmethod + def ConvertCollectorToRow(cls, request): + """ + Create an instance of Collector table rows from a request object. + Args: request: The request object containing collector gRPC message. + Returns: A row (an instance of Collector table) initialized with content of the request. + """ + return cls( + collector_id = request.collector_id.collector_id.uuid, + kpi_id = request.kpi_id.kpi_id.uuid, + sampling_duration_s = request.duration_s, + sampling_interval_s = request.interval_s, + start_timestamp = request.start_time.timestamp, + end_timestamp = request.end_time.timestamp + ) + + @classmethod + def ConvertRowToCollector(cls, row): + """ + Create and return a dictionary representation of a Collector table instance. + Args: row: The Collector table instance (row) containing the data. + Returns: collector gRPC message initialized with the content of a row. + """ + response = telemetry_frontend_pb2.Collector() + response.collector_id.collector_id.uuid = row.collector_id + response.kpi_id.kpi_id.uuid = row.kpi_id + response.duration_s = row.sampling_duration_s + response.interval_s = row.sampling_interval_s + response.start_time.timestamp = row.start_timestamp + response.end_time.timestamp = row.end_timestamp + return response diff --git a/src/telemetry/database/Telemetry_DB.py b/src/telemetry/database/Telemetry_DB.py new file mode 100644 index 0000000000000000000000000000000000000000..32acfd73a410a7bfddd6b487d0b1962afadb3842 --- /dev/null +++ b/src/telemetry/database/Telemetry_DB.py @@ -0,0 +1,137 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sqlalchemy_utils +from sqlalchemy import inspect +from sqlalchemy.orm import sessionmaker +from telemetry.database.TelemetryModel import Collector as CollectorModel +from telemetry.database.TelemetryEngine import TelemetryEngine +from common.method_wrappers.ServiceExceptions import ( + OperationFailedException, AlreadyExistsException ) + +LOGGER = logging.getLogger(__name__) +DB_NAME = "tfs_telemetry" + +class TelemetryDB: + def __init__(self): + self.db_engine = TelemetryEngine.get_engine() + if self.db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return False + self.db_name = DB_NAME + self.Session = sessionmaker(bind=self.db_engine) + + def create_database(self): + if not sqlalchemy_utils.database_exists(self.db_engine.url): + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) + sqlalchemy_utils.create_database(self.db_engine.url) + + def drop_database(self) -> None: + if sqlalchemy_utils.database_exists(self.db_engine.url): + sqlalchemy_utils.drop_database(self.db_engine.url) + + def create_tables(self): + try: + CollectorModel.metadata.create_all(self.db_engine) # type: ignore + LOGGER.debug("Tables created in the database: {:}".format(self.db_name)) + except Exception as e: + LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e))) + raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)]) + + def verify_tables(self): + try: + inspect_object = inspect(self.db_engine) + if(inspect_object.has_table('collector', None)): + LOGGER.info("Table exists in DB: {:}".format(self.db_name)) + except Exception as e: + LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) + +# ----------------- CURD METHODs --------------------- + + def add_row_to_db(self, row): + session = self.Session() + try: + session.add(row) + session.commit() + LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.") + return True + except Exception as e: + session.rollback() + if "psycopg2.errors.UniqueViolation" in str(e): + LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") + raise AlreadyExistsException(row.__class__.__name__, row, + extra_details=["Unique key voilation: {:}".format(e)] ) + else: + LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def search_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + entity = session.query(model).filter_by(**{col_name: id_to_search}).first() + if entity: + # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}") + return entity + else: + LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}") + print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) + return None + except Exception as e: + session.rollback() + LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") + raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) + finally: + session.close() + + def delete_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + record = session.query(model).filter_by(**{col_name: id_to_search}).first() + if record: + session.delete(record) + session.commit() + LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) + else: + LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search) + return None + except Exception as e: + session.rollback() + LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def select_with_filter(self, model, filter_object): + session = self.Session() + try: + query = session.query(CollectorModel) + # Apply filters based on the filter_object + if filter_object.kpi_id: + query = query.filter(CollectorModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id])) + result = query.all() + # query should be added to return all rows + if result: + LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") # - Results: {result} + else: + LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}") + return result + except Exception as e: + LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}") + raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)]) + finally: + session.close() + diff --git a/src/telemetry/database/__init__.py b/src/telemetry/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/telemetry/database/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/database/__main__.py b/src/telemetry/database/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf553eaaec41de7599b6723e31e4ca3f82cbcae --- /dev/null +++ b/src/telemetry/database/__main__.py @@ -0,0 +1,15 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + diff --git a/src/telemetry/frontend/Dockerfile b/src/telemetry/frontend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7125d31fe74f7c44a52c2783369c2dc7a4a31160 --- /dev/null +++ b/src/telemetry/frontend/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/telemetry/frontend +WORKDIR /var/teraflow/telemetry/frontend +COPY src/telemetry/frontend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/telemetry/__init__.py telemetry/__init__.py +COPY src/telemetry/frontend/. telemetry/frontend/ +COPY src/telemetry/database/. telemetry/database/ + +# Start the service +ENTRYPOINT ["python", "-m", "telemetry.frontend.service"] diff --git a/src/telemetry/frontend/__init__.py b/src/telemetry/frontend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..234a1af6588c91f6a17f3963f69120cd6e2248d9 --- /dev/null +++ b/src/telemetry/frontend/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/frontend/client/TelemetryFrontendClient.py b/src/telemetry/frontend/client/TelemetryFrontendClient.py new file mode 100644 index 0000000000000000000000000000000000000000..cd36ecd45933ad10758e408cf03c1bf834d27ba6 --- /dev/null +++ b/src/telemetry/frontend/client/TelemetryFrontendClient.py @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc + +from common.proto.context_pb2 import Empty +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceStub +from common.proto.telemetry_frontend_pb2 import Collector, CollectorId, CollectorFilter, CollectorList + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 10 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class TelemetryFrontendClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.TELEMETRYFRONTEND) + if not port: port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = TelemetryFrontendServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def StartCollector(self, request : Collector) -> CollectorId: # type: ignore + LOGGER.debug('StartCollector: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.StartCollector(request) + LOGGER.debug('StartCollector result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def StopCollector(self, request : CollectorId) -> Empty: # type: ignore + LOGGER.debug('StopCollector: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.StopCollector(request) + LOGGER.debug('StopCollector result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SelectCollectors(self, request : CollectorFilter) -> CollectorList: # type: ignore + LOGGER.debug('SelectCollectors: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SelectCollectors(request) + LOGGER.debug('SelectCollectors result: {:s}'.format(grpc_message_to_json_string(response))) + return response + diff --git a/src/telemetry/frontend/client/__init__.py b/src/telemetry/frontend/client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/telemetry/frontend/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/frontend/requirements.in b/src/telemetry/frontend/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..231dc04e820387c95ffea72cbe67b9f0a9a0865a --- /dev/null +++ b/src/telemetry/frontend/requirements.in @@ -0,0 +1,19 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +confluent-kafka==2.3.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/telemetry/frontend/service/TelemetryFrontendService.py b/src/telemetry/frontend/service/TelemetryFrontendService.py new file mode 100644 index 0000000000000000000000000000000000000000..abd361aa0082e2de1d1f5fa7e81a336f3091af9a --- /dev/null +++ b/src/telemetry/frontend/service/TelemetryFrontendService.py @@ -0,0 +1,29 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.service.GenericGrpcService import GenericGrpcService +from common.proto.telemetry_frontend_pb2_grpc import add_TelemetryFrontendServiceServicer_to_server +from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl + + +class TelemetryFrontendService(GenericGrpcService): + def __init__(self, cls_name: str = __name__) -> None: + port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND) + super().__init__(port, cls_name=cls_name) + self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl() + + def install_servicers(self): + add_TelemetryFrontendServiceServicer_to_server(self.telemetry_frontend_servicer, self.server) diff --git a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..b73d9fa952ee42aeb7adb8f3c0b2e4a3ba7f3e09 --- /dev/null +++ b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py @@ -0,0 +1,198 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import threading +from typing import Any, Dict +import grpc +import logging + +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from common.proto.context_pb2 import Empty +from common.proto.telemetry_frontend_pb2 import CollectorId, Collector, CollectorFilter, CollectorList +from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceServicer + +from telemetry.database.TelemetryModel import Collector as CollectorModel +from telemetry.database.Telemetry_DB import TelemetryDB + +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import Producer as KafkaProducer +from confluent_kafka import KafkaError + + +LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('TelemetryFrontend', 'NBIgRPC') +ACTIVE_COLLECTORS = [] # keep and can be populated from DB + + +class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer): + def __init__(self): + LOGGER.info('Init TelemetryFrontendService') + self.tele_db_obj = TelemetryDB() + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'frontend', + 'auto.offset.reset' : 'latest'}) + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StartCollector(self, + request : Collector, grpc_context: grpc.ServicerContext # type: ignore + ) -> CollectorId: # type: ignore + LOGGER.info ("gRPC message: {:}".format(request)) + response = CollectorId() + + # TODO: Verify the presence of Kpi ID in KpiDB or assume that KPI ID already exists? + self.tele_db_obj.add_row_to_db( + CollectorModel.ConvertCollectorToRow(request) + ) + self.PublishStartRequestOnKafka(request) + + response.collector_id.uuid = request.collector_id.collector_id.uuid + return response + + def PublishStartRequestOnKafka(self, collector_obj): + """ + Method to generate collector request on Kafka. + """ + collector_uuid = collector_obj.collector_id.collector_id.uuid + collector_to_generate : Dict = { + "kpi_id" : collector_obj.kpi_id.kpi_id.uuid, + "duration": collector_obj.duration_s, + "interval": collector_obj.interval_s + } + self.kafka_producer.produce( + KafkaTopic.REQUEST.value, + key = collector_uuid, + value = json.dumps(collector_to_generate), + callback = self.delivery_callback + ) + LOGGER.info("Collector Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_generate)) + ACTIVE_COLLECTORS.append(collector_uuid) + self.kafka_producer.flush() + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StopCollector(self, + request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore + ) -> Empty: # type: ignore + LOGGER.info ("gRPC message: {:}".format(request)) + try: + collector_to_delete = request.collector_id.uuid + self.tele_db_obj.delete_db_row_by_id( + CollectorModel, "collector_id", collector_to_delete + ) + self.PublishStopRequestOnKafka(request) + except Exception as e: + LOGGER.error('Unable to delete collector. Error: {:}'.format(e)) + return Empty() + + def PublishStopRequestOnKafka(self, collector_id): + """ + Method to generate stop collector request on Kafka. + """ + collector_uuid = collector_id.collector_id.uuid + collector_to_stop : Dict = { + "kpi_id" : collector_uuid, + "duration": -1, + "interval": -1 + } + self.kafka_producer.produce( + KafkaTopic.REQUEST.value, + key = collector_uuid, + value = json.dumps(collector_to_stop), + callback = self.delivery_callback + ) + LOGGER.info("Collector Stop Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_stop)) + try: + ACTIVE_COLLECTORS.remove(collector_uuid) + except ValueError: + LOGGER.warning('Collector ID {:} not found in active collector list'.format(collector_uuid)) + self.kafka_producer.flush() + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SelectCollectors(self, + request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore + ) -> CollectorList: # type: ignore + LOGGER.info("gRPC message: {:}".format(request)) + response = CollectorList() + + try: + rows = self.tele_db_obj.select_with_filter(CollectorModel, request) + except Exception as e: + LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e)) + try: + for row in rows: + collector_obj = CollectorModel.ConvertRowToCollector(row) + response.collector_list.append(collector_obj) + return response + except Exception as e: + LOGGER.info('Unable to process filter response {:}'.format(e)) + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def delivery_callback(self, err, msg): + """ + Callback function to handle message delivery status. + Args: + err (KafkaError): Kafka error object. + msg (Message): Kafka message object. + """ + if err: + LOGGER.debug('Message delivery failed: {:}'.format(err)) + print('Message delivery failed: {:}'.format(err)) + # else: + # LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) + # print('Message delivered to topic {:}'.format(msg.topic())) + + # ---------- Independent Method --------------- + # Listener method is independent of any method (same lifetime as service) + # continously listens for responses + def RunResponseListener(self): + threading.Thread(target=self.ResponseListener).start() + return True + + def ResponseListener(self): + """ + listener for response on Kafka topic. + """ + self.kafka_consumer.subscribe([KafkaTopic.RESPONSE.value]) + while True: + receive_msg = self.kafka_consumer.poll(2.0) + if receive_msg is None: + continue + elif receive_msg.error(): + if receive_msg.error().code() == KafkaError._PARTITION_EOF: + continue + else: + print("Consumer error: {}".format(receive_msg.error())) + break + try: + collector_id = receive_msg.key().decode('utf-8') + if collector_id in ACTIVE_COLLECTORS: + kpi_value = json.loads(receive_msg.value().decode('utf-8')) + self.process_response(collector_id, kpi_value['kpi_id'], kpi_value['kpi_value']) + else: + print(f"collector id does not match.\nRespone ID: '{collector_id}' --- Active IDs: '{ACTIVE_COLLECTORS}' ") + except Exception as e: + print(f"Error extarcting msg key or value: {str(e)}") + continue + + def process_response(self, collector_id: str, kpi_id: str, kpi_value: Any): + if kpi_id == "-1" and kpi_value == -1: + print ("Backend termination confirmation for collector id: ", collector_id) + else: + print ("KPI Value: Collector Id:", collector_id, ", Kpi Id:", kpi_id, ", Value:", kpi_value) diff --git a/src/telemetry/frontend/service/__init__.py b/src/telemetry/frontend/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/telemetry/frontend/service/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6c5dbcf2da6b6a074c2b8ee23791bc4896442f --- /dev/null +++ b/src/telemetry/frontend/service/__main__.py @@ -0,0 +1,56 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port +from .TelemetryFrontendService import TelemetryFrontendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + grpc_service = TelemetryFrontendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/telemetry/frontend/tests/Messages.py b/src/telemetry/frontend/tests/Messages.py new file mode 100644 index 0000000000000000000000000000000000000000..a0e93e8a121b9efaac83f7169419911c8ee6e3ea --- /dev/null +++ b/src/telemetry/frontend/tests/Messages.py @@ -0,0 +1,42 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import random +from common.proto import telemetry_frontend_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.proto.kpi_manager_pb2 import KpiId + +# ----------------------- "2nd" Iteration -------------------------------- +def create_collector_id(): + _collector_id = telemetry_frontend_pb2.CollectorId() + # _collector_id.collector_id.uuid = str(uuid.uuid4()) + _collector_id.collector_id.uuid = "5d45f53f-d567-429f-9427-9196ac72ff0c" + return _collector_id + +def create_collector_request(): + _create_collector_request = telemetry_frontend_pb2.Collector() + _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4()) + _create_collector_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_collector_request.duration_s = float(random.randint(8, 16)) + _create_collector_request.interval_s = float(random.randint(2, 4)) + return _create_collector_request + +def create_collector_filter(): + _create_collector_filter = telemetry_frontend_pb2.CollectorFilter() + kpi_id_obj = KpiId() + # kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + kpi_id_obj.kpi_id.uuid = "a7237fa3-caf4-479d-84b6-4d9f9738fb7f" + _create_collector_filter.kpi_id.append(kpi_id_obj) + return _create_collector_filter diff --git a/src/telemetry/frontend/tests/test_frontend.py b/src/telemetry/frontend/tests/test_frontend.py new file mode 100644 index 0000000000000000000000000000000000000000..9c3f9d3a8f545792eb2bb3a371c6c20664d24f69 --- /dev/null +++ b/src/telemetry/frontend/tests/test_frontend.py @@ -0,0 +1,151 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pytest +import logging + +from common.Constants import ServiceNameEnum +from common.proto.telemetry_frontend_pb2 import CollectorId, CollectorList +from common.proto.context_pb2 import Empty +from common.tools.kafka.Variables import KafkaTopic +from common.Settings import ( + get_service_port_grpc, get_env_var_name, ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC) + +from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient +from telemetry.frontend.service.TelemetryFrontendService import TelemetryFrontendService +from telemetry.frontend.tests.Messages import ( + create_collector_request, create_collector_id, create_collector_filter) +from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl + + +########################### +# Tests Setup +########################### + +LOCAL_HOST = '127.0.0.1' + +TELEMETRY_FRONTEND_PORT = str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)) +os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(TELEMETRY_FRONTEND_PORT) + +LOGGER = logging.getLogger(__name__) + +@pytest.fixture(scope='session') +def telemetryFrontend_service(): + LOGGER.info('Initializing TelemetryFrontendService...') + + _service = TelemetryFrontendService() + _service.start() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding TelemetryFrontendService...') + yield _service + + LOGGER.info('Terminating TelemetryFrontendService...') + _service.stop() + + LOGGER.info('Terminated TelemetryFrontendService...') + +@pytest.fixture(scope='session') +def telemetryFrontend_client( + telemetryFrontend_service : TelemetryFrontendService + ): + LOGGER.info('Initializing TelemetryFrontendClient...') + _client = TelemetryFrontendClient() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding TelemetryFrontendClient...') + yield _client + + LOGGER.info('Closing TelemetryFrontendClient...') + _client.close() + + LOGGER.info('Closed TelemetryFrontendClient...') + + +########################### +# Tests Implementation of Telemetry Frontend +########################### + +# ------- Re-structuring Test --------- +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +# ----- core funtionality test ----- +def test_StartCollector(telemetryFrontend_client): + LOGGER.info(' >>> test_StartCollector START: <<< ') + response = telemetryFrontend_client.StartCollector(create_collector_request()) + LOGGER.debug(str(response)) + assert isinstance(response, CollectorId) + +def test_StopCollector(telemetryFrontend_client): + LOGGER.info(' >>> test_StopCollector START: <<< ') + response = telemetryFrontend_client.StopCollector(create_collector_id()) + LOGGER.debug(str(response)) + assert isinstance(response, Empty) + +def test_SelectCollectors(telemetryFrontend_client): + LOGGER.info(' >>> test_SelectCollectors START: <<< ') + response = telemetryFrontend_client.SelectCollectors(create_collector_filter()) + LOGGER.debug(str(response)) + assert isinstance(response, CollectorList) + +# ----- Non-gRPC method tests ----- +def test_RunResponseListener(): + LOGGER.info(' >>> test_RunResponseListener START: <<< ') + TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl() + response = TelemetryFrontendServiceObj.RunResponseListener() # becasue Method "run_kafka_listener" is not define in frontend.proto + LOGGER.debug(str(response)) + assert isinstance(response, bool) + +# ------- previous test ---------------- + +# def test_verify_db_and_table(): +# LOGGER.info(' >>> test_verify_database_and_tables START: <<< ') +# _engine = TelemetryEngine.get_engine() +# managementDB.create_database(_engine) +# managementDB.create_tables(_engine) + +# def test_StartCollector(telemetryFrontend_client): +# LOGGER.info(' >>> test_StartCollector START: <<< ') +# response = telemetryFrontend_client.StartCollector(create_collector_request()) +# LOGGER.debug(str(response)) +# assert isinstance(response, CollectorId) + +# def test_run_kafka_listener(): +# LOGGER.info(' >>> test_run_kafka_listener START: <<< ') +# name_mapping = NameMapping() +# TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping) +# response = TelemetryFrontendServiceObj.run_kafka_listener() # Method "run_kafka_listener" is not define in frontend.proto +# LOGGER.debug(str(response)) +# assert isinstance(response, bool) + +# def test_StopCollector(telemetryFrontend_client): +# LOGGER.info(' >>> test_StopCollector START: <<< ') +# _collector_id = telemetryFrontend_client.StartCollector(create_collector_request()) +# time.sleep(3) # wait for small amount before call the stopCollecter() +# response = telemetryFrontend_client.StopCollector(_collector_id) +# LOGGER.debug(str(response)) +# assert isinstance(response, Empty) + +# def test_select_collectors(telemetryFrontend_client): +# LOGGER.info(' >>> test_select_collector requesting <<< ') +# response = telemetryFrontend_client.SelectCollectors(create_collector_filter()) +# LOGGER.info('Received Rows after applying Filter: {:} '.format(response)) +# LOGGER.debug(str(response)) +# assert isinstance(response, CollectorList) \ No newline at end of file diff --git a/src/telemetry/requirements.in b/src/telemetry/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..a0e78d2bfb7270b9664ad5ba810e2f213d887bf7 --- /dev/null +++ b/src/telemetry/requirements.in @@ -0,0 +1,24 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +anytree==2.8.0 +APScheduler==3.10.1 +influx-line-protocol==0.1.4 +psycopg2-binary==2.9.3 +python-dateutil==2.8.2 +python-json-logger==2.0.2 +pytz==2024.1 +questdb==1.0.1 +requests==2.27.1 +xmltodict==0.12.0 \ No newline at end of file diff --git a/src/telemetry/tests/messages.py b/src/telemetry/tests/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..6919eecc62da0794869f334c4de85cb129fbab14 --- /dev/null +++ b/src/telemetry/tests/messages.py @@ -0,0 +1,80 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import uuid +import random +from common.proto import telemetry_frontend_pb2 +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from telemetry.database.TelemetryModel import Collector as CollectorModel + + +def create_collector_request(): + _create_collector_request = telemetry_frontend_pb2.Collector() + _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4()) + _create_collector_request.kpi_id.kpi_id.uuid = '71d58648-bf47-49ac-996f-e63a9fbfead4' # must be primary key in kpi table + # _create_collector_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_collector_request.duration_s = float(random.randint(8, 16)) + _create_collector_request.interval_s = float(random.randint(2, 4)) + return _create_collector_request + +def create_kpi_request(): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_kpi_request.kpi_description = 'KPI Description Test' + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + _create_kpi_request.service_id.service_uuid.uuid = 'SERV' + _create_kpi_request.device_id.device_uuid.uuid = 'DEV' + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC' + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END' + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON' + # _create_kpi_request.link_id.link_id.uuid = 'LNK' + return _create_kpi_request + +def create_kpi_id_request(): + _create_kpi_id_request = kpi_manager_pb2.KpiId() + _create_kpi_id_request.kpi_id.uuid = '71d58648-bf47-49ac-996f-e63a9fbfead4' + return _create_kpi_id_request + +def create_collector_id_request(): + _create_collector_id_request = telemetry_frontend_pb2.CollectorId() + _create_collector_id_request.collector_id.uuid = '71d58648-bf47-49ac-996f-e63a9fbfead4' + return _create_collector_id_request + +def create_kpi_filter_request(): + # create a dict as follows: 'Key' = 'KpiModel' column name and 'Value' = filter to apply. + _create_kpi_filter_request = dict() + _create_kpi_filter_request['kpi_sample_type'] = 102 + _create_kpi_filter_request['kpi_id'] = '3a17230d-8e95-4afb-8b21-6965481aee5a' + return _create_kpi_filter_request + +def create_collector_filter_request(): + # create a dict as follows: 'Key' = 'KpiModel' column name and 'Value' = filter to apply. + _create_kpi_filter_request = dict() + _create_kpi_filter_request['sampling_interval_s'] = 3.0 + # _create_kpi_filter_request['kpi_id'] = '11e2c6c6-b507-40aa-ab3a-ffd41e7125f0' + return _create_kpi_filter_request + +def create_collector_model_object(): + # Create a new Collector instance + collector_to_insert = CollectorModel() + collector_to_insert.collector_id = str(uuid.uuid4()) + collector_to_insert.kpi_id = '3a17230d-8e95-4afb-8b21-6965481aee5a' + collector_to_insert.collector = "Test collector description" + collector_to_insert.sampling_duration_s = 15 + collector_to_insert.sampling_interval_s = 3 + collector_to_insert.start_timestamp = time.time() + collector_to_insert.end_timestamp = time.time() + return collector_to_insert \ No newline at end of file diff --git a/src/telemetry/tests/test_telemetryDB.py b/src/telemetry/tests/test_telemetryDB.py new file mode 100644 index 0000000000000000000000000000000000000000..c4976f8c2144fcdcad43a3e25d43091010de0d18 --- /dev/null +++ b/src/telemetry/tests/test_telemetryDB.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from telemetry.database.Telemetry_DB import TelemetryDB + +LOGGER = logging.getLogger(__name__) + +def test_verify_databases_and_tables(): + LOGGER.info('>>> test_verify_databases_and_tables : START <<< ') + TelemetryDBobj = TelemetryDB() + TelemetryDBobj.drop_database() + TelemetryDBobj.verify_tables() + TelemetryDBobj.create_database() + TelemetryDBobj.create_tables() + TelemetryDBobj.verify_tables() \ No newline at end of file diff --git a/src/tests/tools/mock_qkd_nodes/YangValidator.py b/src/tests/tools/mock_qkd_nodes/YangValidator.py new file mode 100644 index 0000000000000000000000000000000000000000..2056d5df64a1d841fc74c1be73aa6408051ab738 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/YangValidator.py @@ -0,0 +1,42 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import libyang, os +from typing import Dict, Optional + +YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang') + +class YangValidator: + def __init__(self, main_module : str, dependency_modules : [str]) -> None: + self._yang_context = libyang.Context(YANG_DIR) + + self._yang_module = self._yang_context.load_module(main_module) + mods = [self._yang_context.load_module(mod) for mod in dependency_modules] + [self._yang_module] + + for mod in mods: + mod.feature_enable_all() + + + + def parse_to_dict(self, message : Dict) -> Dict: + dnode : Optional[libyang.DNode] = self._yang_module.parse_data_dict( + message, validate_present=True, validate=True, strict=True + ) + if dnode is None: raise Exception('Unable to parse Message({:s})'.format(str(message))) + message = dnode.print_dict() + dnode.free() + return message + + def destroy(self) -> None: + self._yang_context.destroy() diff --git a/src/tests/tools/mock_qkd_nodes/mock.py b/src/tests/tools/mock_qkd_nodes/mock.py new file mode 100644 index 0000000000000000000000000000000000000000..7a606f6cac855fee9852f620c595908fbb3d36da --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/mock.py @@ -0,0 +1,368 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from flask import Flask, request +from YangValidator import YangValidator + +app = Flask(__name__) + + +yang_validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types']) + + +nodes = { + '127.0.0.1:11111': {'node': { + 'qkdn_id': '00000001-0000-0000-0000-000000000000', + }, + 'qkdn_capabilities': { + }, + 'qkd_applications': { + 'qkd_app': [ + { + 'app_id': '00000001-0001-0000-0000-000000000000', + 'client_app_id': [], + 'app_statistics': { + 'statistics': [] + }, + 'app_qos': { + }, + 'backing_qkdl_id': [] + } + ] + }, + 'qkd_interfaces': { + 'qkd_interface': [ + { + 'qkdi_id': '100', + 'qkdi_att_point': { + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '101', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'1001' + }, + 'qkdi_capabilities': { + } + } + ] + }, + 'qkd_links': { + 'qkd_link': [ + + ] + } + }, + + '127.0.0.1:22222': {'node': { + 'qkdn_id': '00000002-0000-0000-0000-000000000000', + }, + 'qkdn_capabilities': { + }, + 'qkd_applications': { + 'qkd_app': [ + { + 'app_id': '00000002-0001-0000-0000-000000000000', + 'client_app_id': [], + 'app_statistics': { + 'statistics': [] + }, + 'app_qos': { + }, + 'backing_qkdl_id': [] + } + ] + }, + 'qkd_interfaces': { + 'qkd_interface': [ + { + 'qkdi_id': '200', + 'qkdi_att_point': { + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '201', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'2001' + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '202', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'2002' + }, + 'qkdi_capabilities': { + } + } + ] + }, + 'qkd_links': { + 'qkd_link': [ + + ] + } + }, + + '127.0.0.1:33333': {'node': { + 'qkdn_id': '00000003-0000-0000-0000-000000000000', + }, + 'qkdn_capabilities': { + }, + 'qkd_applications': { + 'qkd_app': [ + { + 'app_id': '00000003-0001-0000-0000-000000000000', + 'client_app_id': [], + 'app_statistics': { + 'statistics': [] + }, + 'app_qos': { + }, + 'backing_qkdl_id': [] + } + ] + }, + 'qkd_interfaces': { + 'qkd_interface': [ + { + 'qkdi_id': '300', + 'qkdi_att_point': { + }, + 'qkdi_capabilities': { + } + }, + { + 'qkdi_id': '301', + 'qkdi_att_point': { + 'device':'127.0.0.1', + 'port':'3001' + }, + 'qkdi_capabilities': { + } + } + ] + }, + 'qkd_links': { + 'qkd_link': [ + + ] + } + } +} + + +def get_side_effect(url): + + steps = url.lstrip('https://').lstrip('http://').rstrip('/') + ip_port, _, _, header, *steps = steps.split('/') + + header_splitted = header.split(':') + + module = header_splitted[0] + assert(module == 'etsi-qkd-sdn-node') + + tree = {'qkd_node': nodes[ip_port]['node'].copy()} + + if len(header_splitted) == 1 or not header_splitted[1]: + value = nodes[ip_port].copy() + value.pop('node') + tree['qkd_node'].update(value) + + return tree, tree + + root = header_splitted[1] + assert(root == 'qkd_node') + + if not steps: + return tree, tree + + + endpoint, *steps = steps + + value = nodes[ip_port][endpoint] + + if not steps: + return_value = {endpoint:value} + tree['qkd_node'].update(return_value) + + return return_value, tree + + + + ''' + element, *steps = steps + + container, key = element.split('=') + + # value = value[container][key] + + if not steps: + return_value['qkd_node'][endpoint] = [value] + return return_value + + ''' + raise Exception('Url too long') + + + +def edit(from_dict, to_dict, create): + for key, value in from_dict.items(): + if isinstance(value, dict): + if key not in to_dict and create: + to_dict[key] = {} + edit(from_dict[key], to_dict[key], create) + elif isinstance(value, list): + to_dict[key].extend(value) + else: + to_dict[key] = value + + + +def edit_side_effect(url, json, create): + steps = url.lstrip('https://').lstrip('http://').rstrip('/') + ip_port, _, _, header, *steps = steps.split('/') + + module, root = header.split(':') + + assert(module == 'etsi-qkd-sdn-node') + assert(root == 'qkd_node') + + if not steps: + edit(json, nodes[ip_port]['node']) + return + + endpoint, *steps = steps + + if not steps: + edit(json[endpoint], nodes[ip_port][endpoint], create) + return + + + ''' + element, *steps = steps + + container, key = element.split('=') + + if not steps: + if key not in nodes[ip_port][endpoint][container] and create: + nodes[ip_port][endpoint][container][key] = {} + + edit(json, nodes[ip_port][endpoint][container][key], create) + return 0 + ''' + + raise Exception('Url too long') + + + + + + +@app.get('/', defaults={'path': ''}) +@app.get("/<string:path>") +@app.get('/<path:path>') +def get(path): + msg, msg_validate = get_side_effect(request.base_url) + print(msg_validate) + yang_validator.parse_to_dict(msg_validate) + return msg + + +@app.post('/', defaults={'path': ''}) +@app.post("/<string:path>") +@app.post('/<path:path>') +def post(path): + success = True + reason = '' + try: + edit_side_effect(request.base_url, request.json, True) + except Exception as e: + reason = str(e) + success = False + return {'success': success, 'reason': reason} + + + +@app.route('/', defaults={'path': ''}, methods=['PUT', 'PATCH']) +@app.route("/<string:path>", methods=['PUT', 'PATCH']) +@app.route('/<path:path>', methods=['PUT', 'PATCH']) +def patch(path): + success = True + reason = '' + try: + edit_side_effect(request.base_url, request.json, False) + except Exception as e: + reason = str(e) + success = False + return {'success': success, 'reason': reason} + + + + + +# import json +# from mock import requests +# import pyangbind.lib.pybindJSON as enc +# from pyangbind.lib.serialise import pybindJSONDecoder as dec +# from yang.sbi.qkd.templates.etsi_qkd_sdn_node import etsi_qkd_sdn_node + +# module = etsi_qkd_sdn_node() +# url = 'https://1.1.1.1/restconf/data/etsi-qkd-sdn-node:' + +# # Get node all info +# z = requests.get(url).json() +# var = dec.load_json(z, None, None, obj=module) +# print(enc.dumps(var)) + + +# Reset module variable because it is already filled +# module = etsi_qkd_sdn_node() + +# # Get node basic info +# node = module.qkd_node +# z = requests.get(url + 'qkd_node').json() +# var = dec.load_json(z, None, None, obj=node) +# print(enc.dumps(var)) + + +# # Get all apps +# apps = node.qkd_applications +# z = requests.get(url + 'qkd_node/qkd_applications').json() +# var = dec.load_json(z, None, None, obj=apps) +# print(enc.dumps(var)) + +# # Edit app 0 +# app = apps.qkd_app['00000000-0001-0000-0000-000000000000'] +# app.client_app_id = 'id_0' +# requests.put(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000000', json=json.loads(enc.dumps(app))) + +# # Create app 1 +# app = apps.qkd_app.add('00000000-0001-0000-0000-000000000001') +# requests.post(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000001', json=json.loads(enc.dumps(app))) + +# # Get all apps +# apps = node.qkd_applications +# z = requests.get(url + 'qkd_node/qkd_applications').json() +# var = dec.load_json(z, None, None, obj=apps) +# print(enc.dumps(var)) diff --git a/src/tests/tools/mock_qkd_nodes/start.sh b/src/tests/tools/mock_qkd_nodes/start.sh new file mode 100755 index 0000000000000000000000000000000000000000..b1bc56d5a7f90809e81c73a54803fb2dc11bacd9 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/start.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd "$(dirname "$0")" + +killbg() { + for p in "${pids[@]}" ; do + kill "$p"; + done +} + +trap killbg EXIT +pids=() +flask --app mock run --host 0.0.0.0 --port 11111 & +pids+=($!) +flask --app mock run --host 0.0.0.0 --port 22222 & +pids+=($!) +flask --app mock run --host 0.0.0.0 --port 33333 diff --git a/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang new file mode 100644 index 0000000000000000000000000000000000000000..04bbd8a875445a9bcf19266f21b792439bf9005c --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang @@ -0,0 +1,326 @@ +/* Copyright 2022 ETSI +Licensed under the BSD-3 Clause (https://forge.etsi.org/legal-matters) */ + +module etsi-qkd-node-types { + + yang-version "1"; + + namespace "urn:etsi:qkd:yang:etsi-qkd-node-types"; + + prefix "etsi-qkdn-types"; + + organization "ETSI ISG QKD"; + + contact + "https://www.etsi.org/committee/qkd + vicente@fi.upm.es"; + + description + "This module contains the base types created for + the software-defined QKD node information models + specified in ETSI GS QKD 015 V2.1.1 + - QKD-TECHNOLOGY-TYPES + - QKDN-STATUS-TYPES + - QKD-LINK-TYPES + - QKD-ROLE-TYPES + - QKD-APP-TYPES + - Wavelength + "; + + revision "2022-01-30" { + description + "Refinement of the YANG model to make it compatible with the ETSI ISG QKD 018. Minor fixes."; + } + + revision "2020-09-30" { + description + "First definition based on initial requirement analysis."; + } + + identity QKD-TECHNOLOGY-TYPES { + description "Quantum Key Distribution System base technology types."; + } + + identity CV-QKD { + base QKD-TECHNOLOGY-TYPES; + description "Continuous Variable base technology."; + } + + identity DV-QKD { + base QKD-TECHNOLOGY-TYPES; + description "Discrete Variable base technology."; + } + + identity DV-QKD-COW { + base QKD-TECHNOLOGY-TYPES; + description "COW base technology."; + } + + identity DV-QKD-2Ws { + base QKD-TECHNOLOGY-TYPES; + description "2-Ways base technology."; + } + + typedef qkd-technology-types { + type identityref { + base QKD-TECHNOLOGY-TYPES; + } + description "This type represents the base technology types of the SD-QKD system."; + } + + identity QKDN-STATUS-TYPES { + description "Base identity used to identify the SD-QKD node status."; + } + + identity NEW { + base QKDN-STATUS-TYPES; + description "The QKD node is installed."; + } + + identity OPERATING { + base QKDN-STATUS-TYPES; + description "The QKD node is up."; + } + + identity DOWN { + base QKDN-STATUS-TYPES; + description "The QKD node is not working as expected."; + } + + identity FAILURE { + base QKDN-STATUS-TYPES; + description "The QKD node cannot be accessed by SDN controller with communication failure."; + } + + identity OUT { + base QKDN-STATUS-TYPES; + description "The QKD node is switched off and uninstalled."; + } + + typedef qkdn-status-types { + type identityref { + base QKDN-STATUS-TYPES; + } + description "This type represents the status of the SD-QKD node."; + } + + identity QKD-LINK-TYPES { + description "QKD key association link types."; + } + + identity VIRT { + base QKD-LINK-TYPES; + description "Virtual Link."; + } + + identity PHYS { + base QKD-LINK-TYPES; + description "Physical Link."; + } + + typedef qkd-link-types { + type identityref { + base QKD-LINK-TYPES; + } + description "This type represents the key association link type between two SD-QKD nodes."; + } + + identity QKD-ROLE-TYPES { + description "QKD Role Type."; + } + + identity TRANSMITTER { + base QKD-ROLE-TYPES; + description "QKD module working as transmitter."; + } + + identity RECEIVER { + base QKD-ROLE-TYPES; + description "QKD module working as receiver."; + } + + identity TRANSCEIVER { + base QKD-ROLE-TYPES; + description "QKD System that can work as a transmitter or receiver."; + } + + typedef qkd-role-types { + type identityref { + base QKD-ROLE-TYPES; + } + description "This type represents the working mode of a SD-QKD module."; + } + + identity QKD-APP-TYPES { + description "Application types."; + } + + identity CLIENT { + base QKD-APP-TYPES; + description "Application working as client."; + } + + identity INTERNAL { + base QKD-APP-TYPES; + description "Internal QKD node application."; + } + + typedef qkd-app-types { + type identityref { + base QKD-APP-TYPES; + } + description "This type represents the application class consuming key from SD-QKD nodes."; + } + + identity PHYS-PERF-TYPES { + description "Physical performance types."; + } + + identity QBER { + base PHYS-PERF-TYPES; + description "Quantum Bit Error Rate."; + } + + identity SNR { + base PHYS-PERF-TYPES; + description "Signal to Noise Ratio."; + } + + typedef phys-perf-types { + type identityref { + base PHYS-PERF-TYPES; + } + description "This type represents physical performance types."; + } + + identity LINK-STATUS-TYPES { + description "Status of the key association QKD link (physical and virtual)."; + } + + identity ACTIVE { + base LINK-STATUS-TYPES; + description "Link actively generating keys."; + } + + identity PASSIVE { + base LINK-STATUS-TYPES; + description "No key generation on key association QKD link but a pool of keys + are still available."; + } + + identity PENDING { + base LINK-STATUS-TYPES; + description "Waiting for activation and no keys are available."; + } + + identity OFF { + base LINK-STATUS-TYPES; + description "No key generation and no keys are available."; + } + + typedef link-status-types { + type identityref { + base LINK-STATUS-TYPES; + } + description "This type represents the status of a key association QKD link, both physical and virtual."; + } + + /// + + identity IFACE-STATUS-TYPES { + description "Interface Status."; + } + + identity ENABLED { + base IFACE-STATUS-TYPES; + description "The interfaces is up."; + } + + identity DISABLED { + base IFACE-STATUS-TYPES; + description "The interfaces is down."; + } + + identity FAILED { + base IFACE-STATUS-TYPES; + description "The interfaces has failed."; + } + + typedef iface-status-types { + type identityref { + base IFACE-STATUS-TYPES; + } + description "This type represents the status of a interface between a SD-QKD node and a SD-QKD module."; + } + + identity APP-STATUS-TYPES { + description "Application types."; + } + + identity ON { + base APP-STATUS-TYPES; + description "The application is on."; + } + + identity DISCONNECTED { + base APP-STATUS-TYPES; + description "The application is disconnected."; + } + + identity OUT-OF-TIME { + base APP-STATUS-TYPES; + description "The application is out of time."; + } + + identity ZOMBIE { + base APP-STATUS-TYPES; + description "The application is in a zombie state."; + } + + typedef app-status-types { + type identityref { + base APP-STATUS-TYPES; + } + description "This type represents the status of an application consuming key from SD-QKD nodes."; + } + + identity SEVERITY-TYPES { + description "Error/Failure severity levels."; + } + + identity MAJOR { + base SEVERITY-TYPES; + description "Major error/failure."; + } + + identity MINOR { + base SEVERITY-TYPES; + description "Minor error/failure."; + } + + typedef severity-types { + type identityref { + base SEVERITY-TYPES; + } + description "This type represents the Error/Failure severity levels."; + } + + typedef wavelength { + type string { + pattern "([1-9][0-9]{0,3})"; + } + description + "A WDM channel number (starting at 1). For example: 20"; + } + + //Pattern from "A Yang Data Model for WSON Optical Networks". + typedef wavelength-range-type { + type string { + pattern "([1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?" + + "(,[1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?)*)"; + } + description + "A list of WDM channel numbers (starting at 1) + in ascending order. For example: 1,12-20,40,50-80"; + } +} diff --git a/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang new file mode 100644 index 0000000000000000000000000000000000000000..d07004cdc5b558adc5a9c0b6acb32adac0d7cc11 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang @@ -0,0 +1,941 @@ +/* Copyright 2022 ETSI +Licensed under the BSD-3 Clause (https://forge.etsi.org/legal-matters) */ + +module etsi-qkd-sdn-node { + + yang-version "1"; + + namespace "urn:etsi:qkd:yang:etsi-qkd-node"; + + prefix "etsi-qkdn"; + + import ietf-yang-types { prefix "yang"; } + import ietf-inet-types { prefix "inet"; } + import etsi-qkd-node-types { prefix "etsi-qkdn-types"; } + + // meta + organization "ETSI ISG QKD"; + + contact + "https://www.etsi.org/committee/qkd + vicente@fi.upm.es"; + + description + "This module contains the groupings and containers composing + the software-defined QKD node information models + specified in ETSI GS QKD 015 V2.1.1"; + + revision "2022-01-30" { + description + "Refinement of the YANG model to make it compatible with the ETSI ISG QKD 018. Minor fixes."; + reference + "ETSI GS QKD 015 V2.1.1 (2022-01)"; + } + + revision "2020-09-30" { + description + "First definition based on initial requirement analysis."; + reference + "ETSI GS QKD 015 V1.1.1 (2021-03)"; + } + + grouping qkdn_id { + description "Grouping of qkdn_id leaf."; + + leaf qkdn_id { + type yang:uuid; + mandatory true; + description + "This value reflects the unique ID of the SD-QKD node."; + } + } + + grouping qkdn_version { + description "Grouping of qkdn_version leaf."; + + leaf qkdn_version { + type string; + description "Hardware or software version of the SD-QKD node."; + } + } + + grouping qkdn_location_id { + description "Grouping of qkdn_location_id leaf."; + + leaf qkdn_location_id { + type string; + default ""; + description + "This value enables the location of the secure + area that contains the SD-QKD node to be specified."; + } + } + + grouping qkdn_status { + description "Grouping of qkdn_status leaf."; + + leaf qkdn_status { + type etsi-qkdn-types:qkdn-status-types; + config false; + description "Status of the SD-QKD node."; + } + } + + grouping qkdn_capabilities { + description "Grouping of the capabilities of the SD-QKD node."; + + container qkdn_capabilities { + description "Capabilities of the SD-QKD node."; + + leaf link_stats_support { + type boolean; + default true; + description + "If true, this node exposes link-related statistics (secure key + generation rate-SKR, link consumption, status, QBER)."; + } + + leaf application_stats_support { + type boolean; + default true; + description "If true, this node exposes application related + statistics (application consumption, alerts)."; + } + + leaf key_relay_mode_enable { + type boolean; + default true; + description "If true, this node supports key relay (multi-hop) mode services."; + } + } + } + + grouping app_id { + description "Grouping of app_id leaf."; + + leaf app_id { + type yang:uuid; + description + "Unique ID that identifies a QKD application consisting of a set of entities + that are allowed to receive keys shared with each other from the SD-QKD nodes + they connect to. This value is similar to a key ID or key handle."; + } + } + + grouping app_basic { + description "Grouping of app's basic parameters."; + + uses app_id; + + leaf app_status { + type etsi-qkdn-types:app-status-types; + config false; + description "Status of the application."; + } + } + + grouping app_priority { + description "Grouping of app_priority leaf."; + + leaf app_priority { + type uint32; + default 0; + description "Priority of the association/application + might be defined by the user but usually + handled by a network administrator."; + } + } + + grouping app_details { + description "Grouping of app's details parameters."; + + leaf app_type { + type etsi-qkdn-types:qkd-app-types; + description "Type of the registered application. These + values, defined within the types module, can be client + (if an external applications requesting keys) + or internal (application is defined to maintain + the QKD - e.g. multi-hop, authentication or + other encryption operations)."; + } + + leaf server_app_id { + type inet:uri; + description "ID that identifies the entity that initiated the + creation of the QKD application to receive keys shared with one + or more specified target entity identified by client_app_id. + It is a client in the interface to the SD-QKD node and the name + server_app_id reflects that it requested the QKD application to + be initiated."; + } + + leaf-list client_app_id { + type inet:uri; + description "List of IDs that identifies the one or more + entities that are allowed to receive keys from SD-QKD + node(s) under the QKD application in addition to the + initiating entity identified by server_app_id."; + } + + uses app_priority; + } + + grouping local_qkdn_id { + description "Grouping of local_qkdn_id leaf."; + + leaf local_qkdn_id { + type yang:uuid; + description "Unique ID of the local SD-QKD node which + is providing QKD keys to the local application."; + } + } + + grouping app_time { + description "Grouping of app's time parameters."; + + leaf creation_time { + type yang:date-and-time; + config false; + description "Date and time of the service creation."; + } + + leaf expiration_time { + type yang:date-and-time; + description "Date and time of the service expiration."; + } + } + + grouping app_statistics { + description "Grouping of app's statistic parameters."; + + container app_statistics { + description "Statistical information relating to a specific statistic period of time."; + + list statistics { + key "end_time"; + config false; + description "List of statistics."; + + leaf end_time { + type yang:date-and-time; + config false; + description "End time for the statistic period."; + } + + leaf start_time { + type yang:date-and-time; + config false; + description "Start time for the statistic period."; + } + + leaf consumed_bits { + type uint32; + config false; + description "Consumed secret key amount (in bits) for a statistics collection period of time."; + } + } + } + } + + grouping app_qos { + description "Grouping of app's basic qos parameters."; + + container app_qos { + description "Requested Quality of Service."; + + leaf max_bandwidth { + type uint32; + description "Maximum bandwidth (in bits per second) allowed for + this specific application. Exceeding this value will raise an + error from the local key store to the appl. This value might + be internally configured (or by an admin) with a default value."; + } + + leaf min_bandwidth { + type uint32; + description "This value is an optional QoS parameter which + enables to require a minimum key rate (in bits per second) + for the application."; + } + + leaf jitter { + type uint32; + description "This value allows to specify the maximum jitter + (in msec) to be provided by the key delivery API for + applications requiring fast rekeying. This value can be + coordinated with the other QoS to provide a wide enough + QoS definition."; + } + + leaf ttl { + type uint32; + description "This value is used to specify the maximum time + (in seconds) that a key could be kept in the key store for + a given application without being used."; + } + } + } + + grouping augmented_app_qos { + description "Grouping of app's detailed qos parameters."; + + uses app_qos { + augment app_qos { + description "Augmentation of app's basic parameters with app's detailed qos parameters."; + + leaf clients_shared_path_enable { + type boolean; + default false; + description "If true, multiple clients for this + application might share keys to reduce service + impact (consumption)."; + } + + leaf clients_shared_keys_required { + type boolean; + default false; + description "If true, multiple clients for this application + might share keys to reduce service impact (consumption)."; + } + } + } + } + + grouping qkd_applications { + description "Grouping of the list of applications container."; + + container qkd_applications { + description "List of applications container."; + + list qkd_app { + key "app_id"; + description "List of applications that are currently registered + in the SD-QKD node. Any entity consuming QKD-derived keys (either + for internal or external purposes) is considered an application."; + + uses app_basic; + + uses app_details; + + uses app_time; + + uses app_statistics; + + uses augmented_app_qos; + + leaf-list backing_qkdl_id { + type yang:uuid; + description "Unique ID of the key association link which is + providing QKD keys to these applications."; + } + + uses local_qkdn_id; + + leaf remote_qkdn_id { + type yang:uuid; + description "Unique ID of the remote SD-QKD node which + is providing QKD keys to the remote application. + While unknown, the local SD-QKD will not be able to + provide keys to the local application."; + } + } + } + } + + grouping qkdi_status { + description "Grouping of qkdi_status leaf."; + + leaf qkdi_status { + type etsi-qkdn-types:iface-status-types; + config false; + description "Status of a QKD interface of the SD-QKD node."; + } + } + + grouping qkdi_model { + description "Grouping of qkdi_model leaf."; + + leaf qkdi_model { + type string; + description "Device model (vendor/device)."; + } + } + + grouping qkdi_type { + description "Grouping of qkdi_type leaf."; + + leaf qkdi_type { + type etsi-qkdn-types:qkd-technology-types; + description "Interface type (QKD technology)."; + } + } + + grouping qkdi_att_point { + description "Grouping of the interface attachment points to an optical switch."; + + container qkdi_att_point { + description "Interface attachment point to an optical switch."; + + leaf device { + type string; + description "Unique ID of the optical switch (or + passive component) to which the interface is connected."; + } + + leaf port { + type uint32; + description "Port ID of the device to which the interface + is connected."; + } + } + } + + grouping qkdi_id { + description "Grouping of qkdi_id leaf."; + + leaf qkdi_id { + type uint32; + description "Interface id. It is described as a locally unique number, + which is globally unique when combined with the SD-QKD node ID."; + } + } + + grouping qkd_interface_item { + description "Grouping of the interface parameters."; + + uses qkdi_id; + + uses qkdi_model; + + uses qkdi_type; + + uses qkdi_att_point; + + container qkdi_capabilities { + description "Capabilities of the QKD system (interface)."; + + leaf role_support { + type etsi-qkdn-types:qkd-role-types; + description "QKD node support for key relay mode services."; + } + + leaf wavelength_range { + type etsi-qkdn-types:wavelength-range-type; + description "Range of supported wavelengths (nm) (multiple + if it contains a tunable laser)."; + } + + leaf max_absorption { + type decimal64 { + fraction-digits 3; + } + description "Maximum absorption supported (in dB)."; + } + } + } + + grouping qkd_interfaces { + description "Grouping of the list of interfaces."; + + container qkd_interfaces { + description "List of interfaces container."; + + list qkd_interface { + key "qkdi_id"; + description "List of physical QKD modules in a secure location, + abstracted as interfaces of the SD-QKD node."; + + uses qkd_interface_item; + + uses qkdi_status; + + } + } + } + + grouping qkdl_id { + description "Grouping of qkdl_id leaf."; + + leaf qkdl_id { + type yang:uuid; + description "Unique ID of the QKD link (key association)."; + } + } + + grouping qkdl_status { + description "Grouping of qkdl_status leaf."; + + leaf qkdl_status { + type etsi-qkdn-types:link-status-types; + description "Status of the QKD key association link."; + } + } + + grouping common_performance { + description "Grouping of common performance parameters."; + + leaf expected_consumption { + type uint32; + config false; + description "Sum of all the application's bandwidth (in bits per + second) on this particular key association link."; + } + + leaf skr { + type uint32; + config false; + description "Secret key rate generation (in bits per second) + of the key association link."; + } + + leaf eskr { + type uint32; + config false; + description "Effective secret key rate (in bits per second) generation + of the key association link available after internal consumption."; + } + } + + grouping physical_link_perf { + description "Grouping of the list of physical performance parameters."; + + list phys_perf { + key "perf_type"; + config false; + description "List of physical performance parameters."; + + leaf perf_type { + type etsi-qkdn-types:phys-perf-types; + config false; + description "Type of the physical performance value to be + exposed to the controller."; + } + + leaf value { + type decimal64 { + fraction-digits 3; + } + config false; + description "Numerical value for the performance parameter + type specified above."; + } + } + } + + grouping virtual_link_spec { + description "Grouping of the virtual link's parameters."; + + leaf virt_prev_hop { + type yang:uuid; + description "Previous hop in a multi-hop/virtual key + association link config."; + } + + leaf-list virt_next_hop { + type yang:uuid; + description "Next hop(s) in a multihop/virtual key + association link config. Defined as a list for multicast + over shared sub-paths."; + } + + leaf virt_bandwidth { + type uint32; + description "Required bandwidth (in bits per second) for that key association link. + Used to reserve bandwidth from the physical QKD links to support the virtual key + association link as an internal application."; + } + } + + grouping physical_link_spec { + description "Grouping of the physical link's parameters."; + + leaf phys_channel_att { + type decimal64 { + fraction-digits 3; + } + description "Expected attenuation on the quantum channel (in dB) + between the Source/qkd_node and Destination/qkd_node."; + + } + + leaf phys_wavelength { + type etsi-qkdn-types:wavelength; + description "Wavelength (in nm) to be used for the quantum channel. + If the interface is not tunable, this configuration could be bypassed"; + } + + leaf phys_qkd_role { + type etsi-qkdn-types:qkd-role-types; + description "Transmitter/receiver mode for the QKD module. + If there is no multi-role support, this could be ignored."; + } + } + + grouping qkd_links { + description "Grouping of the list of links."; + + container qkd_links { + description "List of links container"; + + list qkd_link { + key "qkdl_id"; + description "List of (key association) links to other SD-QKD nodes in the network. + The links can be physical (direct quantum channel) or virtual multi-hop + connection doing key-relay through several nodes."; + + uses qkdl_id; + + uses qkdl_status; + + leaf qkdl_enable { + type boolean; + default true; + description "This value allows to enable of disable the key generation + process for a given link."; + + } + + container qkdl_local { + description "Source (local) node of the SD-QKD link."; + + leaf qkdn_id { + type yang:uuid; + description "Unique ID of the local SD-QKD node."; + } + + leaf qkdi_id { + type uint32; + description "Interface used to create the key association link."; + } + } + + container qkdl_remote { + description "Destination (remote) unique SD-QKD node."; + + leaf qkdn_id { + type yang:uuid; + description "Unique ID of the remote SD-QKD node. This value is + provided by the SDN controller when the key association link + request arrives."; + } + + leaf qkdi_id { + type uint32; + description "Interface used to create the link."; + } + } + + leaf qkdl_type { + type etsi-qkdn-types:qkd-link-types; + description "Key Association Link type: Virtual (multi-hop) or Direct."; + } + + leaf-list qkdl_applications { + type yang:uuid; + description "Applications which are consuming keys from + this key association link."; + } + + uses virtual_link_spec { + when "qkdl_type = 'etsi-qkd-node-types:VIRT'" { + description "Virtual key association link specific configuration."; + } + } + + uses physical_link_spec { + when "qkdl_type = 'etsi-qkd-node-types:PHYS'" { + description "Physical key association link specific configuration."; + } + } + + container qkdl_performance { + description "Container of link's performace parameters."; + + uses common_performance; + + uses physical_link_perf { + when "../qkdl_type = 'PHYS'" { + description "Performance of the specific physical link."; + } + } + } + } + } + } + + container qkd_node { + description + "Top module describing a software-defined QKD node (SD-QKD node)."; + + uses qkdn_id; + + uses qkdn_status; + + uses qkdn_version; + + uses qkdn_location_id; + + uses qkdn_capabilities; + + uses qkd_applications; + + uses qkd_interfaces; + + uses qkd_links; + } + + grouping message { + description "Grouping of message leaf."; + + leaf message { + type string; + description "Placeholder for the message."; + } + } + + grouping severity { + description "Grouping of severity leaf."; + + leaf severity { + type etsi-qkdn-types:severity-types; + description "Placeholder for the severity."; + } + } + + grouping reason { + description "Grouping of reason leaf."; + + leaf reason { + type string; + description "Auxiliary parameter to include additional + information about the reason for link failure."; + } + } + + notification sdqkdn_application_new { + description "Defined for the controller to detect new applications + requesting keys from a QKD node. This maps with the workflow shown + in clause 5.2 'QKD Application Registration'. Parameters such as + client and server app IDs, local QKD node identifier, priority and + QoS are sent in the notification."; + + container qkd_application { + description "'sdqkdn_application_new' notification's qkd_application parameters."; + + uses app_details; + + uses local_qkdn_id; + + uses augmented_app_qos; + + } + } + + notification sdqkdn_application_qos_update { + description "Notification that includes information about priority or + QoS changes on an existing and already registered application."; + + container qkd_application { + description "'sdqkdn_application_qos_update' notification's qkd_application parameters."; + + uses app_id; + + uses augmented_app_qos; + + uses app_priority; + + } + } + + notification sdqkdn_application_disconnected { + description "Includes the application identifier to inform that the + application is no longer registered and active in the QKD node."; + + container qkd_application { + description "'sdqkdn_application_disconnected' notification's qkd_application parameters."; + + uses app_id; + + } + } + + notification sdqkdn_interface_new { + description "Includes all the information about the new QKD system + installed in the secure location of a given QKD node."; + + container qkd_interface { + description "'sdqkdn_interface_new' notification's qkd_interface parameters."; + + uses qkd_interface_item; + + } + } + + notification sdqkdn_interface_down { + description "Identifies an interface within a QKD node which is not + working as expected, allowing additional information to be included + in a 'reason' string field."; + + container qkd_interface { + description "'sdqkdn_interface_down' notification's qkd_interface parameters."; + + uses qkdi_id; + + uses reason; + + } + } + + notification sdqkdn_interface_out { + description "Contains the ID of an interface which is switch off and + uninstall from a QKD node. This information can be gathered from this + notification or from regular polling from the controller's side."; + + container qkd_interface { + description "'sdqkdn_interface_out' notification's qkd_interface parameters."; + + uses qkdi_id; + + } + } + + notification sdqkdn_link_down { + description "As in the interface down event, this notification contains + the identifier of a given link which has gone down unexpectedly. + In addition, further information can be sent in the 'reason' field."; + + container qkd_link { + description "'sdqkdn_link_down' notification's qkd_link parameters."; + + uses qkdl_id; + + uses reason; + + } + } + + notification sdqkdn_link_perf_update { + description "This notification allows to inform of any mayor + modification in the performance of an active link. The identifier + of the link is sent together with the performance parameters of the link."; + + container qkd_link { + description "'sdqkdn_link_perf_update' notification's qkd_link parameters."; + + uses qkdl_id; + + container performance { + description "'sdqkdn_link_perf_update' notification's performance parameters."; + + uses common_performance; + + uses physical_link_perf; + + } + } + } + + notification sdqkdn_link_overloaded { + description "This notification is sent when the link cannot cope with the + demand. The link identifier is sent with the expected consumption and + general performance parameters."; + + container qkd_link { + description "'sdqkdn_link_overloaded' notification's qkd_link parameters."; + + uses qkdl_id; + + container performance { + description "'sdqkdn_link_overloaded' notification's performance parameters."; + + uses common_performance; + + } + } + } + + notification alarm { + description "'alarm' notification."; + + container link { + description "'alarm' notification's link parameters."; + + uses qkdl_id; + + uses qkdl_status; + + uses message; + + uses severity; + + } + + container interface { + description "'alarm' notification's interface parameters."; + + uses qkdi_id; + + uses qkdi_status; + + uses message; + + uses severity; + + } + + container application { + description "'alarm' notification's application parameters."; + + uses app_basic; + + uses message; + + uses severity; + + } + + } + + notification event { + description "'event' notification."; + + container link { + description "'alarm' notification's link parameters."; + + uses qkdl_id; + + uses qkdl_status; + + uses message; + + uses severity; + + } + + container interface { + description "'alarm' notification's interface parameters."; + + uses qkdi_id; + + uses qkdi_status; + + uses message; + + uses severity; + + } + + container application { + description "'alarm' notification's application parameters."; + + uses app_basic; + + uses message; + + uses severity; + + } + + } + +} diff --git a/src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang b/src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang new file mode 100644 index 0000000000000000000000000000000000000000..eacefb6363de1beb543567a0fa705571b7dc57a2 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang @@ -0,0 +1,458 @@ +module ietf-inet-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types"; + prefix "inet"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types for Internet addresses and related things. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - ip-address-no-zone + - ipv4-address-no-zone + - ipv6-address-no-zone"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of types related to protocol fields ***/ + + typedef ip-version { + type enumeration { + enum unknown { + value "0"; + description + "An unknown or unspecified version of the Internet + protocol."; + } + enum ipv4 { + value "1"; + description + "The IPv4 protocol as defined in RFC 791."; + } + enum ipv6 { + value "2"; + description + "The IPv6 protocol as defined in RFC 2460."; + } + } + description + "This value represents the version of the IP protocol. + + In the value set and its semantics, this type is equivalent + to the InetVersion textual convention of the SMIv2."; + reference + "RFC 791: Internet Protocol + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + typedef dscp { + type uint8 { + range "0..63"; + } + description + "The dscp type represents a Differentiated Services Code Point + that may be used for marking packets in a traffic stream. + In the value set and its semantics, this type is equivalent + to the Dscp textual convention of the SMIv2."; + reference + "RFC 3289: Management Information Base for the Differentiated + Services Architecture + RFC 2474: Definition of the Differentiated Services Field + (DS Field) in the IPv4 and IPv6 Headers + RFC 2780: IANA Allocation Guidelines For Values In + the Internet Protocol and Related Headers"; + } + + typedef ipv6-flow-label { + type uint32 { + range "0..1048575"; + } + description + "The ipv6-flow-label type represents the flow identifier or Flow + Label in an IPv6 packet header that may be used to + discriminate traffic flows. + + In the value set and its semantics, this type is equivalent + to the IPv6FlowLabel textual convention of the SMIv2."; + reference + "RFC 3595: Textual Conventions for IPv6 Flow Label + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification"; + } + + typedef port-number { + type uint16 { + range "0..65535"; + } + description + "The port-number type represents a 16-bit port number of an + Internet transport-layer protocol such as UDP, TCP, DCCP, or + SCTP. Port numbers are assigned by IANA. A current list of + all assignments is available from <http://www.iana.org/>. + + Note that the port number value zero is reserved by IANA. In + situations where the value zero does not make sense, it can + be excluded by subtyping the port-number type. + In the value set and its semantics, this type is equivalent + to the InetPortNumber textual convention of the SMIv2."; + reference + "RFC 768: User Datagram Protocol + RFC 793: Transmission Control Protocol + RFC 4960: Stream Control Transmission Protocol + RFC 4340: Datagram Congestion Control Protocol (DCCP) + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + /*** collection of types related to autonomous systems ***/ + + typedef as-number { + type uint32; + description + "The as-number type represents autonomous system numbers + which identify an Autonomous System (AS). An AS is a set + of routers under a single technical administration, using + an interior gateway protocol and common metrics to route + packets within the AS, and using an exterior gateway + protocol to route packets to other ASes. IANA maintains + the AS number space and has delegated large parts to the + regional registries. + + Autonomous system numbers were originally limited to 16 + bits. BGP extensions have enlarged the autonomous system + number space to 32 bits. This type therefore uses an uint32 + base type without a range restriction in order to support + a larger autonomous system number space. + + In the value set and its semantics, this type is equivalent + to the InetAutonomousSystemNumber textual convention of + the SMIv2."; + reference + "RFC 1930: Guidelines for creation, selection, and registration + of an Autonomous System (AS) + RFC 4271: A Border Gateway Protocol 4 (BGP-4) + RFC 4001: Textual Conventions for Internet Network Addresses + RFC 6793: BGP Support for Four-Octet Autonomous System (AS) + Number Space"; + } + + /*** collection of types related to IP addresses and hostnames ***/ + + typedef ip-address { + type union { + type inet:ipv4-address; + type inet:ipv6-address; + } + description + "The ip-address type represents an IP address and is IP + version neutral. The format of the textual representation + implies the IP version. This type supports scoped addresses + by allowing zone identifiers in the address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '(%[\p{N}\p{L}]+)?'; + } + description + "The ipv4-address type represents an IPv4 address in + dotted-quad notation. The IPv4 address may include a zone + index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format for the zone index is the numerical + format"; + } + + typedef ipv6-address { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(%[\p{N}\p{L}]+)?'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(%.+)?'; + } + description + "The ipv6-address type represents an IPv6 address in full, + mixed, shortened, and shortened-mixed notation. The IPv6 + address may include a zone index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format of IPv6 addresses uses the textual + representation defined in Section 4 of RFC 5952. The + canonical format for the zone index is the numerical + format as described in Section 11.2 of RFC 4007."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-address-no-zone { + type union { + type inet:ipv4-address-no-zone; + type inet:ipv6-address-no-zone; + } + description + "The ip-address-no-zone type represents an IP address and is + IP version neutral. The format of the textual representation + implies the IP version. This type does not support scoped + addresses since it does not allow zone identifiers in the + address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address-no-zone { + type inet:ipv4-address { + pattern '[0-9\.]*'; + } + description + "An IPv4 address without a zone index. This type, derived from + ipv4-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + } + + typedef ipv6-address-no-zone { + type inet:ipv6-address { + pattern '[0-9a-fA-F:\.]*'; + } + description + "An IPv6 address without a zone index. This type, derived from + ipv6-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-prefix { + type union { + type inet:ipv4-prefix; + type inet:ipv6-prefix; + } + description + "The ip-prefix type represents an IP prefix and is IP + version neutral. The format of the textual representations + implies the IP version."; + } + + typedef ipv4-prefix { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '/(([0-9])|([1-2][0-9])|(3[0-2]))'; + } + description + "The ipv4-prefix type represents an IPv4 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 32. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The canonical format of an IPv4 prefix has all bits of + the IPv4 address set to zero that are not part of the + IPv4 prefix."; + } + + typedef ipv6-prefix { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(/.+)'; + } + + description + "The ipv6-prefix type represents an IPv6 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 128. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The IPv6 address should have all bits that do not belong + to the prefix set to zero. + + The canonical format of an IPv6 prefix has all bits of + the IPv6 address set to zero that are not part of the + IPv6 prefix. Furthermore, the IPv6 address is represented + as defined in Section 4 of RFC 5952."; + reference + "RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + /*** collection of domain name and URI types ***/ + + typedef domain-name { + type string { + pattern + '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*' + + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)' + + '|\.'; + length "1..253"; + } + description + "The domain-name type represents a DNS domain name. The + name SHOULD be fully qualified whenever possible. + + Internet domain names are only loosely specified. Section + 3.5 of RFC 1034 recommends a syntax (modified in Section + 2.1 of RFC 1123). The pattern above is intended to allow + for current practice in domain name use, and some possible + future expansion. It is designed to hold various types of + domain names, including names used for A or AAAA records + (host names) and other records, such as SRV records. Note + that Internet host names have a stricter syntax (described + in RFC 952) than the DNS recommendations in RFCs 1034 and + 1123, and that systems that want to store host names in + schema nodes using the domain-name type are recommended to + adhere to this stricter standard to ensure interoperability. + + The encoding of DNS names in the DNS protocol is limited + to 255 characters. Since the encoding consists of labels + prefixed by a length bytes and there is a trailing NULL + byte, only 253 characters can appear in the textual dotted + notation. + + The description clause of schema nodes using the domain-name + type MUST describe when and how these names are resolved to + IP addresses. Note that the resolution of a domain-name value + may require to query multiple DNS records (e.g., A for IPv4 + and AAAA for IPv6). The order of the resolution process and + which DNS record takes precedence can either be defined + explicitly or may depend on the configuration of the + resolver. + + Domain-name values use the US-ASCII encoding. Their canonical + format uses lowercase US-ASCII characters. Internationalized + domain names MUST be A-labels as per RFC 5890."; + reference + "RFC 952: DoD Internet Host Table Specification + RFC 1034: Domain Names - Concepts and Facilities + RFC 1123: Requirements for Internet Hosts -- Application + and Support + RFC 2782: A DNS RR for specifying the location of services + (DNS SRV) + RFC 5890: Internationalized Domain Names in Applications + (IDNA): Definitions and Document Framework"; + } + + typedef host { + type union { + type inet:ip-address; + type inet:domain-name; + } + description + "The host type represents either an IP address or a DNS + domain name."; + } + + typedef uri { + type string; + description + "The uri type represents a Uniform Resource Identifier + (URI) as defined by STD 66. + + Objects using the uri type MUST be in US-ASCII encoding, + and MUST be normalized as described by RFC 3986 Sections + 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary + percent-encoding is removed, and all case-insensitive + characters are set to lowercase except for hexadecimal + digits, which are normalized to uppercase as described in + Section 6.2.2.1. + + The purpose of this normalization is to help provide + unique URIs. Note that this normalization is not + sufficient to provide uniqueness. Two URIs that are + textually distinct after this normalization may still be + equivalent. + + Objects using the uri type may restrict the schemes that + they permit. For example, 'data:' and 'urn:' schemes + might not be appropriate. + + A zero-length URI is not a valid URI. This can be used to + express 'URI absent' where required. + + In the value set and its semantics, this type is equivalent + to the Uri SMIv2 textual convention defined in RFC 5017."; + reference + "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax + RFC 3305: Report from the Joint W3C/IETF URI Planning Interest + Group: Uniform Resource Identifiers (URIs), URLs, + and Uniform Resource Names (URNs): Clarifications + and Recommendations + RFC 5017: MIB Textual Conventions for Uniform Resource + Identifiers (URIs)"; + } + +} diff --git a/src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang b/src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang new file mode 100644 index 0000000000000000000000000000000000000000..ee58fa3ab0042120d5607b8713d21fa0ba845895 --- /dev/null +++ b/src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang @@ -0,0 +1,474 @@ +module ietf-yang-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types"; + prefix "yang"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: <http://tools.ietf.org/wg/netmod/> + WG List: <mailto:netmod@ietf.org> + + WG Chair: David Kessens + <mailto:david.kessens@nsn.com> + + WG Chair: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de> + + Editor: Juergen Schoenwaelder + <mailto:j.schoenwaelder@jacobs-university.de>"; + + description + "This module contains a collection of generally useful derived + YANG data types. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - yang-identifier + - hex-string + - uuid + - dotted-quad"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of counter and gauge types ***/ + + typedef counter32 { + type uint32; + description + "The counter32 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter32 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter32 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter32. + + In the value set and its semantics, this type is equivalent + to the Counter32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter32 { + type yang:counter32; + default "0"; + description + "The zero-based-counter32 type represents a counter32 + that has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter32 textual convention of the SMIv2."; + reference + "RFC 4502: Remote Network Monitoring Management Information + Base Version 2"; + } + + typedef counter64 { + type uint64; + description + "The counter64 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter64 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter64 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter64. + + In the value set and its semantics, this type is equivalent + to the Counter64 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter64 { + type yang:counter64; + default "0"; + description + "The zero-based-counter64 type represents a counter64 that + has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter64 textual convention of the SMIv2."; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + typedef gauge32 { + type uint32; + description + "The gauge32 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^32-1 (4294967295 decimal), and + the minimum value cannot be smaller than 0. The value of + a gauge32 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge32 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the Gauge32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef gauge64 { + type uint64; + description + "The gauge64 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^64-1 (18446744073709551615), and + the minimum value cannot be smaller than 0. The value of + a gauge64 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge64 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the CounterBasedGauge64 SMIv2 textual convention defined + in RFC 2856"; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + /*** collection of identifier-related types ***/ + + typedef object-identifier { + type string { + pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))' + + '(\.(0|([1-9]\d*)))*'; + } + description + "The object-identifier type represents administratively + assigned names in a registration-hierarchical-name tree. + + Values of this type are denoted as a sequence of numerical + non-negative sub-identifier values. Each sub-identifier + value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers + are separated by single dots and without any intermediate + whitespace. + + The ASN.1 standard restricts the value space of the first + sub-identifier to 0, 1, or 2. Furthermore, the value space + of the second sub-identifier is restricted to the range + 0 to 39 if the first sub-identifier is 0 or 1. Finally, + the ASN.1 standard requires that an object identifier + has always at least two sub-identifiers. The pattern + captures these restrictions. + + Although the number of sub-identifiers is not limited, + module designers should realize that there may be + implementations that stick with the SMIv2 limit of 128 + sub-identifiers. + + This type is a superset of the SMIv2 OBJECT IDENTIFIER type + since it is not restricted to 128 sub-identifiers. Hence, + this type SHOULD NOT be used to represent the SMIv2 OBJECT + IDENTIFIER type; the object-identifier-128 type SHOULD be + used instead."; + reference + "ISO9834-1: Information technology -- Open Systems + Interconnection -- Procedures for the operation of OSI + Registration Authorities: General procedures and top + arcs of the ASN.1 Object Identifier tree"; + } + + typedef object-identifier-128 { + type object-identifier { + pattern '\d*(\.\d*){1,127}'; + } + description + "This type represents object-identifiers restricted to 128 + sub-identifiers. + + In the value set and its semantics, this type is equivalent + to the OBJECT IDENTIFIER type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef yang-identifier { + type string { + length "1..max"; + pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*'; + pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*'; + } + description + "A YANG identifier string as defined by the 'identifier' + rule in Section 12 of RFC 6020. An identifier must + start with an alphabetic character or an underscore + followed by an arbitrary sequence of alphabetic or + numeric characters, underscores, hyphens, or dots. + + A YANG identifier MUST NOT start with any possible + combination of the lowercase or uppercase character + sequence 'xml'."; + reference + "RFC 6020: YANG - A Data Modeling Language for the Network + Configuration Protocol (NETCONF)"; + } + + /*** collection of types related to date and time***/ + + typedef date-and-time { + type string { + pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?' + + '(Z|[\+\-]\d{2}:\d{2})'; + } + description + "The date-and-time type is a profile of the ISO 8601 + standard for representation of dates and times using the + Gregorian calendar. The profile is defined by the + date-time production in Section 5.6 of RFC 3339. + + The date-and-time type is compatible with the dateTime XML + schema type with the following notable exceptions: + + (a) The date-and-time type does not allow negative years. + + (b) The date-and-time time-offset -00:00 indicates an unknown + time zone (see RFC 3339) while -00:00 and +00:00 and Z + all represent the same time zone in dateTime. + + (c) The canonical format (see below) of data-and-time values + differs from the canonical format used by the dateTime XML + schema type, which requires all times to be in UTC using + the time-offset 'Z'. + + This type is not equivalent to the DateAndTime textual + convention of the SMIv2 since RFC 3339 uses a different + separator between full-date and full-time and provides + higher resolution of time-secfrac. + + The canonical format for date-and-time values with a known time + zone uses a numeric time zone offset that is calculated using + the device's configured known offset to UTC time. A change of + the device's offset to UTC time will cause date-and-time values + to change accordingly. Such changes might happen periodically + in case a server follows automatically daylight saving time + (DST) time zone offset changes. The canonical format for + date-and-time values with an unknown time zone (usually + referring to the notion of local time) uses the time-offset + -00:00."; + reference + "RFC 3339: Date and Time on the Internet: Timestamps + RFC 2579: Textual Conventions for SMIv2 + XSD-TYPES: XML Schema Part 2: Datatypes Second Edition"; + } + + typedef timeticks { + type uint32; + description + "The timeticks type represents a non-negative integer that + represents the time, modulo 2^32 (4294967296 decimal), in + hundredths of a second between two epochs. When a schema + node is defined that uses this type, the description of + the schema node identifies both of the reference epochs. + + In the value set and its semantics, this type is equivalent + to the TimeTicks type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef timestamp { + type yang:timeticks; + description + "The timestamp type represents the value of an associated + timeticks schema node at which a specific occurrence + happened. The specific occurrence must be defined in the + description of any schema node defined using this type. When + the specific occurrence occurred prior to the last time the + associated timeticks attribute was zero, then the timestamp + value is zero. Note that this requires all timestamp values + to be reset to zero when the value of the associated timeticks + attribute reaches 497+ days and wraps around to zero. + + The associated timeticks schema node must be specified + in the description of any schema node using this type. + + In the value set and its semantics, this type is equivalent + to the TimeStamp textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of generic address types ***/ + + typedef phys-address { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "Represents media- or physical-level addresses represented + as a sequence octets, each octet represented by two hexadecimal + numbers. Octets are separated by colons. The canonical + representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the PhysAddress textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + typedef mac-address { + type string { + pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'; + } + description + "The mac-address type represents an IEEE 802 MAC address. + The canonical representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the MacAddress textual convention of the SMIv2."; + reference + "IEEE 802: IEEE Standard for Local and Metropolitan Area + Networks: Overview and Architecture + RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of XML-specific types ***/ + + typedef xpath1.0 { + type string; + description + "This type represents an XPATH 1.0 expression. + + When a schema node is defined that uses this type, the + description of the schema node MUST specify the XPath + context in which the XPath expression is evaluated."; + reference + "XPATH: XML Path Language (XPath) Version 1.0"; + } + + /*** collection of string types ***/ + + typedef hex-string { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + description + "A hexadecimal string with octets represented as hex digits + separated by colons. The canonical representation uses + lowercase characters."; + } + + typedef uuid { + type string { + pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'; + } + description + "A Universally Unique IDentifier in the string representation + defined in RFC 4122. The canonical representation uses + lowercase characters. + + The following is an example of a UUID in string representation: + f81d4fae-7dec-11d0-a765-00a0c91e6bf6 + "; + reference + "RFC 4122: A Universally Unique IDentifier (UUID) URN + Namespace"; + } + + typedef dotted-quad { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'; + } + description + "An unsigned 32-bit number expressed in the dotted-quad + notation, i.e., four octets written as decimal numbers + and separated with the '.' (full stop) character."; + } +} diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index 9b6950aeb0f3fda630db87d5d80ad2bf730fac3d..b864d3549e051b54e888c80547724da14fec5f67 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -49,7 +49,10 @@ def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]: if isinstance(data, dict): return [('kv', (key, value)) for key, value in data.items()] elif isinstance(data, list): - return [('item', ', '.join(data))] + if len(data) == 1 and isinstance(data[0], dict): + return [('kv', (key, value)) for key, value in data[0].items()] + else: + return [('item', ', '.join([str(d) for d in data]))] else: return [('item', str(data))] diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index fb930024d9fc3ff0b70b77345041e33dcb0d28c2..b7fdb78e85dc634627de02947c0861a7f13bdae9 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -165,6 +165,16 @@ def inventory(device_uuid: str): context_client.close() return render_template('device/inventory.html', device=device_obj) +@device.route('logical/<path:device_uuid>', methods=['GET', 'POST']) +def logical(device_uuid: str): + context_client.connect() + device_obj = get_device(context_client, device_uuid, rw_copy=False) + if device_obj is None: + flash('Device({:s}) not found'.format(str(device_uuid)), 'danger') + device_obj = Device() + context_client.close() + return render_template('device/logical.html', device=device_obj) + @device.get('<path:device_uuid>/delete') def delete(device_uuid): try: diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 66e188465994a47f173dcca93237b46cd86adb16..c154346204a4ad59eec54a7e9ae3956a7f3db655 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -156,7 +156,7 @@ <div class="container"> <div class="row"> <div class="col-md-12"> - <p class="text-center" style="color: white;">© 2022-2023 <a href="https://tfs.etsi.org/">ETSI TeraFlowSDN (TFS) OSG</a></p> + <p class="text-center" style="color: white;">© 2022-2024 <a href="https://tfs.etsi.org/">ETSI OSG/SDG TeraFlowSDN (TFS)</a></p> </div> </div> <div class="row"> diff --git a/src/webui/service/templates/device/home.html b/src/webui/service/templates/device/home.html index fca799acafd965442bc0ee998ae70f9c76a45e32..b1237eac1fde1a16dea30515dd6fcbe353bcdca5 100644 --- a/src/webui/service/templates/device/home.html +++ b/src/webui/service/templates/device/home.html @@ -51,6 +51,7 @@ <th scope="col">Config Rules</th> <th scope="col"></th> <th scope="col"></th> + <th scope="col"></th> </tr> </thead> <tbody> @@ -83,6 +84,14 @@ </svg> </a> </td> + <td> + <a href="{{ url_for('device.logical', device_uuid=device.device_id.device_uuid.uuid) }}"> + <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle" viewBox="0 0 16 16"> + <path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14m0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16"/> + <path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0"/> + </svg> + </a> + </td> </tr> {% endfor %} {% else %} diff --git a/src/webui/service/templates/device/logical.html b/src/webui/service/templates/device/logical.html new file mode 100644 index 0000000000000000000000000000000000000000..8a2541989f0ed51c1257aae5fe8d76bfd01ff5c3 --- /dev/null +++ b/src/webui/service/templates/device/logical.html @@ -0,0 +1,397 @@ +<!-- + Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + --> + +{% extends 'base.html' %} + +{% block content %} +<style> + ul, + #myUL { + list-style-type: none; + } + + #myUL { + margin: 0; + padding: 0; + } + + .caret { + cursor: pointer; + -webkit-user-select: none; + /* Safari 3.1+ */ + -moz-user-select: none; + /* Firefox 2+ */ + -ms-user-select: none; + /* IE 10+ */ + user-select: none; + } + + .caret::before { + content: "\25B6"; + color: black; + display: inline-block; + margin-right: 6px; + } + + .caret-down::before { + -ms-transform: rotate(90deg); + /* IE 9 */ + -webkit-transform: rotate(90deg); + /* Safari */ + transform: rotate(90deg); + } + + .nested { + display: none; + } + + .active { + display: block; + } +</style> + +<h1>Device {{ device.name }} ({{ device.device_id.device_uuid.uuid }})</h1> + +<div class="row mb-3"> + <div class="col-sm-3"> + <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('device.home') }}'"> + <i class="bi bi-box-arrow-in-left"></i> + Back to device list + </button> + </div> +</div> +<br> + +<div class="row mb-3"> + <div> + <ul id="myUL"> + <li><span class="caret">ACL</span> + <ul class="nested"> + {% set acl_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/acl/' in config.custom.resource_key %} + {% if 'acl-set' in config.custom.resource_key %} + {% set acl_name = config.custom.resource_key.split('acl-set[')[1].split('][')[0] %} + {% else %} + {% set acl_name = config.custom.resource_key.split('ress[')[1].split('][')[0] %} + {% endif %} + {% if acl_name|length == 0 %} + {% set acl_name = 'Undefined' %} + {% endif %} + {% if acl_name not in acl_names %} + {% set _ = acl_names.append(acl_name) %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for acl_name in acl_names %} + <li><span class="caret">{{ acl_name }}</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/acl/' in config.custom.resource_key and acl_name in config.custom.resource_key.split('][')[0] %} + {% if 'acl-entry' in config.custom.resource_key %} + {% set rule_number = config.custom.resource_key.split('acl-entry[')[1].split(']')[0] %} + <li><span><b>Rule {{ rule_number }}:</b> {{ config.custom.resource_value }}</span></li> + {% else %} + <li><span><b>Interface:</b> {{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + {% endfor %} + </ul> + </li> + </ul> + + <ul id="myUL"> + <li><span class="caret">Routing Policy</span> + <ul class="nested"> + {% set pol_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/routing_policy/' in config.custom.resource_key %} + {% if 'policy_definition' in config.custom.resource_key %} + {% set pol_name = config.custom.resource_key.split('policy_definition[')[1].split(']')[0] %} + {% endif %} + {% if pol_name|length == 0 %} + {% set pol_name = 'Undefined' %} + {% endif %} + {% if pol_name not in pol_names %} + {% set _ = pol_names.append(pol_name) %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for pol_name in pol_names %} + <li><span class="caret">{{ pol_name }}</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/routing_policy/' in config.custom.resource_key and pol_name in config.custom.resource_key.split('[')[1].split(']')[0] %} + {% if 'policy_definition' not in config.custom.resource_key %} + <li><span>{{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + {% endfor %} + </ul> + </li> + </ul> + + <ul id="myUL"> + <li><span class="caret">VRFs</span> + <ul class="nested"> + <li><span class="caret">VRF default</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/network_instance' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in 'default' %} + {% if ']/' in config.custom.resource_key%} + {% set aux = config.custom.resource_key.split(']/')[1].split('[')[0] %} + <li><span><b> {{ aux.replace('_', ' ').title() }}:</b> {{ config.custom.resource_value }}</span></li> + {% else %} + <li><span><b> Network Instance:</b> {{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + + <li><span class="caret">L3VPN</span> + <ul class="nested"> + {% set vpn_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/network_instance' in config.custom.resource_key %} + + {% if 'L3VRF' in config.custom.resource_value %} + {% set vpn_name = config.custom.resource_key.split('network_instance[')[1].split(']')[0] %} + {% if vpn_name not in vpn_names %} + {% set _ = vpn_names.append(vpn_name) %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for vpn_name in vpn_names %} + <li><span class="caret">{{ vpn_name }}</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/network_instance' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in vpn_name %} + {% if ']/' in config.custom.resource_key%} + {% set aux = config.custom.resource_key.split(']/')[1].split('[')[0] %} + <li><span><b> {{ aux.replace('_', ' ').title() }}:</b> {{ config.custom.resource_value }}</span></li> + {% else %} + <li><span><b> Network Instance:</b> {{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + {% endfor %} + </ul> + </li> + + <li><span class="caret">L2VPN</span> + <ul class="nested"> + {% set vpn_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/network_instance' in config.custom.resource_key %} + + {% if 'L2VSI' in config.custom.resource_value %} + {% set vpn_name = config.custom.resource_key.split('network_instance[')[1].split(']')[0] %} + {% if vpn_name not in vpn_names %} + {% set _ = vpn_names.append(vpn_name) %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for vpn_name in vpn_names %} + <li><span class="caret">{{ vpn_name }}</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/network_instance' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in vpn_name %} + {% if ']/' in config.custom.resource_key%} + {% set aux = config.custom.resource_key.split(']/')[1].split('[')[0] %} + <li><span><b> {{ aux.replace('_', ' ').title() }}:</b> {{ config.custom.resource_value }}</span></li> + {% else %} + <li><span><b> Network Instance:</b> {{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + {% endfor %} + </ul> + </li> + </ul> + </li> + </ul> + + <ul id="myUL"> + <li><span class="caret">Interfaces</span> + <ul class="nested"> + <li><span class="caret">Logical Interfaces</span> + <ul class="nested"> + {% set interface_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/interface[' in config.custom.resource_key %} + {% if 'ethernetCsmacd' in config.custom.resource_value %} + {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %} + <li><span>{{ interface_name}}:</span> {{config.custom.resource_value}}</li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + + <li><span class="caret">Loopback</span> + <ul class="nested"> + {% set interface_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/interface[' in config.custom.resource_key %} + {% if 'softwareLoopback' in config.custom.resource_value %} + {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %} + {% if interface_name not in interface_names %} + {% set _ = interface_names.append(interface_name) %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for interface_name in interface_names %} + <li><span class="caret">{{ interface_name }}</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/interface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %} + {% if 'subinterface' in config.custom.resource_key %} + {% set subinterface_name = config.custom.resource_key.split('subinterface[')[1].split(']')[0] %} + <li><span><b>Subinterface {{subinterface_name}}: </b>{{ config.custom.resource_value }}</span></li> + {% else %} + <li><span>{{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + {% endfor %} + </ul> + </li> + + <li><span class="caret">Interfaces L3</span> + <ul class="nested"> + {% set interface_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/interface[' in config.custom.resource_key %} + {% if 'l3ipvlan' in config.custom.resource_value %} + {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %} + {% if interface_name not in interface_names %} + {% set _ = interface_names.append(interface_name) %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for interface_name in interface_names %} + <li><span class="caret">{{ interface_name }}</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/interface' in config.custom.resource_key and '/subinterface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %} + <li><span>{{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + {% endfor %} + </ul> + </li> + + <li><span class="caret">Interfaces L2</span> + <ul class="nested"> + {% set interface_names = [] %} + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if '/interface[' in config.custom.resource_key %} + {% if 'l2vlan' in config.custom.resource_value or 'mplsTunnel' in config.custom.resource_value %} + {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %} + {% if interface_name not in interface_names %} + {% set _ = interface_names.append(interface_name) %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for interface_name in interface_names %} + <li><span class="caret">{{ interface_name }}</span> + <ul class="nested"> + {% for config in device.device_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} + {% if 'subinterface' in config.custom.resource_key %} + {% if '/interface' in config.custom.resource_key and '/subinterface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %} + <li><span>{{ config.custom.resource_value }}</span></li> + {% endif %} + {% else %} + {% if '/interface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %} + <li><span>{{ config.custom.resource_value }}</span></li> + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + </ul> + </li> + {% endfor %} + </ul> + </li> + </ul> + </li> + </ul> + + <script> + var toggler = document.getElementsByClassName("caret"); + var i; + for (i = 0; i < toggler.length; i++) { + toggler[i].addEventListener("click", function() { + this.parentElement.querySelector(".nested").classList.toggle("active"); + this.classList.toggle("caret-down"); + }); + } + </script> + </div> +</div> + +{% endblock %}