diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5f826eadbd50ea3f212f5f84d00946ee6feae4fb..ad0daad286c58e9d5da8480f9fd41ed66ab6efc8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -48,6 +48,7 @@ include: - local: '/src/kpi_manager/.gitlab-ci.yml' - local: '/src/kpi_value_api/.gitlab-ci.yml' - local: '/src/kpi_value_writer/.gitlab-ci.yml' + - local: '/src/telemetry/.gitlab-ci.yml' - local: '/src/qos_profile/.gitlab-ci.yml' # This should be last one: end-to-end integration tests diff --git a/deploy/all.sh b/deploy/all.sh index f93cd92ac5e3189b0dc8fa71d74a586e929aaecc..e9b33b469b7cad1547ab0dcb63e326672f51971e 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -215,6 +215,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} # Deploy QuestDB ./deploy/qdb.sh +# Deploy Apache Kafka +./deploy/kafka.sh + # Expose Dashboard ./deploy/expose_dashboard.sh diff --git a/deploy/kafka.sh b/deploy/kafka.sh index 4a91bfc9e657d1b8a6a548b9c0a81a2f8a0b45e0..0483bce153b457800c6f7db2ef66685e90118111 100755 --- a/deploy/kafka.sh +++ b/deploy/kafka.sh @@ -20,50 +20,71 @@ # If not already set, set the namespace where Apache Kafka will be deployed. export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} +# If not already set, set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"} + +# If not already set, if flag is YES, Apache Kafka will be redeployed and all topics will be lost. +export KFK_REDEPLOY=${KFK_REDEPLOY:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## -# Constants -TMP_FOLDER="./tmp" -KFK_MANIFESTS_PATH="manifests/kafka" -KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml" -KFK_MANIFEST="02-kafka.yaml" + # Constants + TMP_FOLDER="./tmp" + KFK_MANIFESTS_PATH="manifests/kafka" + KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml" + KFK_MANIFEST="02-kafka.yaml" + + # Create a tmp folder for files modified during the deployment + TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests" + mkdir -p ${TMP_MANIFESTS_FOLDER} -# Create a tmp folder for files modified during the deployment -TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests" -mkdir -p ${TMP_MANIFESTS_FOLDER} +function kafka_deploy() { + # copy zookeeper and kafka manifest files to temporary manifest location + cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" + cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}" -# copy zookeeper and kafka manifest files to temporary manifest location -cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" -cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}" + # echo "Apache Kafka Namespace" + echo ">>> Delete Apache Kafka Namespace" + kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found -echo "Apache Kafka Namespace" -echo ">>> Delete Apache Kafka Namespace" -kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found + echo ">>> Create Apache Kafka Namespace" + kubectl create namespace ${KFK_NAMESPACE} -echo ">>> Create Apache Kafka Namespace" -kubectl create namespace ${KFK_NAMESPACE} + # echo ">>> Deplying Apache Kafka Zookeeper" + # Kafka zookeeper service should be deployed before the kafka service + kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" -echo ">>> Deplying Apache Kafka Zookeeper" -# Kafka zookeeper service should be deployed before the kafka service -kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" + KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically + KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') -KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically -KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') + # Kafka service should be deployed after the zookeeper service + sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" -# Kafka service should be deployed after the zookeeper service -sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + # echo ">>> Deploying Apache Kafka Broker" + kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" -echo ">>> Deploying Apache Kafka Broker" -kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + # echo ">>> Verifing Apache Kafka deployment" + sleep 5 + # KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods) + # if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then + # echo "Deployment Error: \n $KFK_PODS_STATUS" + # else + # echo "$KFK_PODS_STATUS" + # fi +} -echo ">>> Verifing Apache Kafka deployment" -sleep 10 -KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods) -if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then - echo "Deployment Error: \n $KFK_PODS_STATUS" +echo "Apache Kafka" +echo ">>> Checking if Apache Kafka is deployed ... " +if [ "$KFK_REDEPLOY" == "YES" ]; then + echo ">>> Redeploying kafka namespace" + kafka_deploy +elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then + echo ">>> Apache Kafka already present; skipping step." else - echo "$KFK_PODS_STATUS" -fi \ No newline at end of file + echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace" + kafka_deploy +fi +echo diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 62f36a2c138c99b1ee666c8c5397083266ad699d..e7201441815c7cc08c46cce3714f33f43401c2eb 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -115,6 +115,17 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"} export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} +# ----- Apache Kafka ------------------------------------------------------ + +# If not already set, set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} + +# If not already set, set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"} + +# If not already set, if flag is YES, Apache Kafka will be redeployed and topic will be lost. +export KFK_REDEPLOY=${KFK_REDEPLOY:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -147,7 +158,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type= --from-literal=CRDB_SSLMODE=require printf "\n" -echo "Create secret with CockroachDB data for KPI Management" +echo "Create secret with CockroachDB data for KPI Management microservices" CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt" # TODO: change by specific configurable environment variable kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ @@ -159,6 +170,25 @@ kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --t --from-literal=CRDB_SSLMODE=require printf "\n" +echo "Create secret with CockroachDB data for Telemetry microservices" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +CRDB_DATABASE_TELEMETRY="tfs_telemetry" # TODO: change by specific configurable environment variable +kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with Apache Kafka data for KPI and Telemetry microservices" +KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}') +kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \ + --from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT} +printf "\n" + echo "Create secret with NATS data" NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') if [ -z "$NATS_CLIENT_PORT" ]; then @@ -234,15 +264,17 @@ for COMPONENT in $TFS_COMPONENTS; do if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" - elif [ "$COMPONENT" == "pathcomp" ]; then + elif [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ]; then BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" - # next command is redundant, but helpful to keep cache updated between rebuilds - IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" - $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + if [ "$COMPONENT" == "pathcomp" ]; then + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + fi elif [ "$COMPONENT" == "dlt" ]; then BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" @@ -255,7 +287,7 @@ for COMPONENT in $TFS_COMPONENTS; do echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." - if [ "$COMPONENT" == "pathcomp" ]; then + if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ]; then IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" @@ -306,7 +338,7 @@ for COMPONENT in $TFS_COMPONENTS; do cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" fi - if [ "$COMPONENT" == "pathcomp" ]; then + if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ]; then IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" diff --git a/manifests/kpi_value_apiservice.yaml b/manifests/kpi_value_apiservice.yaml index 74eb90f675794f1b451b04af55e191edff58fae5..e4dcb00545ffaa33de39fd29c029780b777ea91f 100644 --- a/manifests/kpi_value_apiservice.yaml +++ b/manifests/kpi_value_apiservice.yaml @@ -39,6 +39,9 @@ spec: env: - name: LOG_LEVEL value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:30020"] diff --git a/manifests/kpi_value_writerservice.yaml b/manifests/kpi_value_writerservice.yaml index 8a8e44ec2a571f1290e30a08d1c896a6339cbe46..e21e36f48ba08999f142e8548fed61cd2dfef0cc 100644 --- a/manifests/kpi_value_writerservice.yaml +++ b/manifests/kpi_value_writerservice.yaml @@ -39,6 +39,9 @@ spec: env: - name: LOG_LEVEL value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:30030"] diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f9917499a425b95d436ffa8cdb311d29483d2ca --- /dev/null +++ b/manifests/telemetryservice.yaml @@ -0,0 +1,128 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: telemetryservice +spec: + selector: + matchLabels: + app: telemetryservice + #replicas: 1 + template: + metadata: + labels: + app: telemetryservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: frontend + image: labs.etsi.org:5050/tfs/controller/telemetry-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 30050 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: crdb-telemetry + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30050"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30050"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: backend + image: labs.etsi.org:5050/tfs/controller/telemetry-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 30060 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: kfk-kpi-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30060"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:30060"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: telemetryservice + labels: + app: telemetryservice +spec: + type: ClusterIP + selector: + app: telemetryservice + ports: + - name: frontend-grpc + protocol: TCP + port: 30050 + targetPort: 30050 + - name: backend-grpc + protocol: TCP + port: 30060 + targetPort: 30060 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: telemetryservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: telemetryservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 diff --git a/my_deploy.sh b/my_deploy.sh index b89df7481ebd17edf2b966eb818598d1a04a596f..45e0d1301b2807ae7393d45366f849c69285b909 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -181,3 +181,8 @@ export GRAF_EXT_PORT_HTTP="3000" # Set the namespace where Apache Kafka will be deployed. export KFK_NAMESPACE="kafka" +# Set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT="9092" + +# Set the flag to YES for redeploying of Apache Kafka +export KFK_REDEPLOY="" diff --git a/proto/telemetry_frontend.proto b/proto/telemetry_frontend.proto index dbc1e8bf688f9f2df341484c1929e2338c458bbf..614d10cf06cdbb1ff4fba6e51a39286eb5132688 100644 --- a/proto/telemetry_frontend.proto +++ b/proto/telemetry_frontend.proto @@ -19,9 +19,9 @@ import "context.proto"; import "kpi_manager.proto"; service TelemetryFrontendService { - rpc StartCollector (Collector ) returns (CollectorId ) {} - rpc StopCollector (CollectorId ) returns (context.Empty) {} - rpc SelectCollectors(CollectorFilter) returns (CollectorList) {} + rpc StartCollector (Collector ) returns (CollectorId ) {} + rpc StopCollector (CollectorId ) returns (context.Empty) {} + rpc SelectCollectors (CollectorFilter) returns (CollectorList) {} } message CollectorId { @@ -29,10 +29,12 @@ message CollectorId { } message Collector { - CollectorId collector_id = 1; // The Collector ID - kpi_manager.KpiId kpi_id = 2; // The KPI Id to be associated to the collected samples - float duration_s = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely - float interval_s = 4; // Interval between collected samples + CollectorId collector_id = 1; // The Collector ID + kpi_manager.KpiId kpi_id = 2; // The KPI Id to be associated to the collected samples + float duration_s = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely + float interval_s = 4; // Interval between collected samples + context.Timestamp start_time = 5; // Timestamp when Collector start execution + context.Timestamp end_time = 6; // Timestamp when Collector stop execution } message CollectorFilter { diff --git a/scripts/run_tests_locally-kpi-manager.sh b/scripts/run_tests_locally-kpi-manager.sh index a6a24f90db93d56300ac997bd00675c479ef13ae..8a4ce8d95c74657451147078a1d93e891dfc2ac8 100755 --- a/scripts/run_tests_locally-kpi-manager.sh +++ b/scripts/run_tests_locally-kpi-manager.sh @@ -24,7 +24,7 @@ cd $PROJECTDIR/src # python3 kpi_manager/tests/test_unitary.py RCFILE=$PROJECTDIR/coverage/.coveragerc -CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}') +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ kpi_manager/tests/test_kpi_manager.py diff --git a/scripts/run_tests_locally-kpi-value-API.sh b/scripts/run_tests_locally-kpi-value-API.sh index 8dfbfb16237634519dcae2fcc34f850a5188c1e7..3953d2a89c6fbe2bd3546e648246b9b018e5fdb0 100755 --- a/scripts/run_tests_locally-kpi-value-API.sh +++ b/scripts/run_tests_locally-kpi-value-API.sh @@ -19,7 +19,8 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc - +KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") +KFK_SERVER_ADDRESS=${KAFKA_IP}:9092 # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG -o log_cli=true --verbose \ kpi_value_api/tests/test_kpi_value_api.py diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh index bb1c48b76440c00b398875a8f704c2a82ba4ab50..4b9a417603cc42a4e7e8b19c7394cc38633817fa 100755 --- a/scripts/run_tests_locally-telemetry-DB.sh +++ b/scripts/run_tests_locally-telemetry-DB.sh @@ -22,5 +22,5 @@ cd $PROJECTDIR/src # kpi_manager/tests/test_unitary.py RCFILE=$PROJECTDIR/coverage/.coveragerc -python3 -m pytest --log-cli-level=INFO --verbose \ - telemetry/database/tests/telemetryDBtests.py +python3 -m pytest --log-level=DEBUG --log-cli-level=debug --verbose \ + telemetry/tests/test_telemetryDB.py diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh index 7652ccb583268285dcd2fcf3090b717dc18e4fc3..a2a1de52340cac527d4d1c446c76740d38ce7783 100755 --- a/scripts/run_tests_locally-telemetry-frontend.sh +++ b/scripts/run_tests_locally-telemetry-frontend.sh @@ -24,5 +24,5 @@ cd $PROJECTDIR/src # python3 kpi_manager/tests/test_unitary.py RCFILE=$PROJECTDIR/coverage/.coveragerc -python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \ +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ telemetry/frontend/tests/test_frontend.py diff --git a/src/common/Constants.py b/src/common/Constants.py index 8b0fa80c158a6782fc529d598b179ddd5c9e3926..1bfde1dd9e62ec39969bbd83135fe77c33e03e19 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -65,6 +65,7 @@ class ServiceNameEnum(Enum): KPIVALUEAPI = 'kpi-value-api' KPIVALUEWRITER = 'kpi-value-writer' TELEMETRYFRONTEND = 'telemetry-frontend' + TELEMETRYBACKEND = 'telemetry-backend' QOSPROFILE = 'qos-profile' # Used for test and debugging only @@ -95,11 +96,12 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.E2EORCHESTRATOR .value : 10050, ServiceNameEnum.OPTICALCONTROLLER .value : 10060, ServiceNameEnum.BGPLS .value : 20030, + ServiceNameEnum.QOSPROFILE .value : 20040, ServiceNameEnum.KPIMANAGER .value : 30010, ServiceNameEnum.KPIVALUEAPI .value : 30020, ServiceNameEnum.KPIVALUEWRITER .value : 30030, ServiceNameEnum.TELEMETRYFRONTEND .value : 30050, - ServiceNameEnum.QOSPROFILE .value : 30060, + ServiceNameEnum.TELEMETRYBACKEND .value : 30060, # Used for test and debugging only ServiceNameEnum.DLT_GATEWAY .value : 50051, diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py index 24ae2cff7b5e710e18999eb09029216a4a5d6c8a..5ada88a1ea0a7eae31eda741d81757fa624521de 100644 --- a/src/common/tools/kafka/Variables.py +++ b/src/common/tools/kafka/Variables.py @@ -16,14 +16,29 @@ import logging from enum import Enum from confluent_kafka import KafkaException from confluent_kafka.admin import AdminClient, NewTopic +from common.Settings import get_setting LOGGER = logging.getLogger(__name__) +KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-service.{:s}.svc.cluster.local:{:s}' class KafkaConfig(Enum): - # SERVER_IP = "127.0.0.1:9092" - SERVER_IP = "kafka-service.kafka.svc.cluster.local:9092" - ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_IP}) + + @staticmethod + def get_kafka_address() -> str: + kafka_server_address = get_setting('KFK_SERVER_ADDRESS', default=None) + if kafka_server_address is None: + KFK_NAMESPACE = get_setting('KFK_NAMESPACE') + KFK_PORT = get_setting('KFK_SERVER_PORT') + kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT) + return kafka_server_address + + @staticmethod + def get_admin_client(): + SERVER_ADDRESS = KafkaConfig.get_kafka_address() + ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_ADDRESS }) + return ADMIN_CLIENT + class KafkaTopic(Enum): REQUEST = 'topic_request' @@ -38,8 +53,9 @@ class KafkaTopic(Enum): Method to create Kafka topics defined as class members """ all_topics = [member.value for member in KafkaTopic] + LOGGER.debug("Kafka server address is: {:} ".format(KafkaConfig.get_kafka_address())) if( KafkaTopic.create_new_topic_if_not_exists( all_topics )): - LOGGER.debug("All topics are created sucsessfully") + LOGGER.debug("All topics are created sucsessfully or Already Exists") return True else: LOGGER.debug("Error creating all topics") @@ -55,14 +71,14 @@ class KafkaTopic(Enum): LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics)) for topic in new_topics: try: - topic_metadata = KafkaConfig.ADMIN_CLIENT.value.list_topics(timeout=5) + topic_metadata = KafkaConfig.get_admin_client().list_topics(timeout=5) # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics)) if topic not in topic_metadata.topics: # If the topic does not exist, create a new topic print("Topic {:} does not exist. Creating...".format(topic)) LOGGER.debug("Topic {:} does not exist. Creating...".format(topic)) new_topic = NewTopic(topic, num_partitions=1, replication_factor=1) - KafkaConfig.ADMIN_CLIENT.value.create_topics([new_topic]) + KafkaConfig.get_admin_client().create_topics([new_topic]) else: print("Topic name already exists: {:}".format(topic)) LOGGER.debug("Topic name already exists: {:}".format(topic)) diff --git a/src/kpi_manager/README.md b/src/kpi_manager/README.md index c1feadcc4843db26a219d1e3b37833ddd80b18dc..6e9b56d9349aa6acd5c41004e32c933619a37f65 100644 --- a/src/kpi_manager/README.md +++ b/src/kpi_manager/README.md @@ -1,29 +1,24 @@ # How to locally run and test KPI manager micro-service -## --- File links need to be updated. --- ### Pre-requisets -The following requirements should be fulfilled before the execuation of KPI management service. +Ensure the following requirements are met before executing the KPI management service: -1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. -2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully. -3. Verify the creation of required database and table. -[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and -[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment. +1. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/requirements.in) sucessfully installed. +2. Verify the creation of required database and table. The +[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_db.py) python file lists the functions to create tables and the database. The +[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/database/KpiEngine.py) file contains the DB string. ### Messages format templates -["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing. +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_messages.py) python file contains templates for creating gRPC messages. -### Test file -["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment. +### Unit test file +The ["KPI manager test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_manager.py) python file lists various tests conducted to validate functionality. ### Flow of execution (Kpi Maanager Service functions) 1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table. -2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types. +2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor to the `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types. -3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB. +3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from the DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from the DB. -4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types. - -## For KPI composer and KPI writer -The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in). +4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types. diff --git a/src/kpi_manager/database/Kpi_DB.py b/src/kpi_manager/database/Kpi_DB.py index 4b60640707c8d0c2ce90e5ab135ddf6fd4c91f63..49ad9c9b579daa918818366a1d9505089968edc2 100644 --- a/src/kpi_manager/database/Kpi_DB.py +++ b/src/kpi_manager/database/Kpi_DB.py @@ -34,14 +34,15 @@ class KpiDB: def create_database(self) -> None: if not sqlalchemy_utils.database_exists(self.db_engine.url): - LOGGER.debug("Database created. {:}".format(self.db_engine.url)) sqlalchemy_utils.create_database(self.db_engine.url) + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) def drop_database(self) -> None: if sqlalchemy_utils.database_exists(self.db_engine.url): sqlalchemy_utils.drop_database(self.db_engine.url) def create_tables(self): + # TODO: use "get_tables(declatrative class obj)" method of "sqlalchemy_utils" to verify tables. try: KpiModel.metadata.create_all(self.db_engine) # type: ignore LOGGER.debug("Tables created in the DB Name: {:}".format(self.db_name)) @@ -69,8 +70,7 @@ class KpiDB: session.rollback() if "psycopg2.errors.UniqueViolation" in str(e): LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") - raise AlreadyExistsException(row.__class__.__name__, row, - extra_details=["Unique key voilation: {:}".format(e)] ) + raise AlreadyExistsException(row.__class__.__name__, row, extra_details=["Unique key voilation: {:}".format(e)] ) else: LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) @@ -89,7 +89,6 @@ class KpiDB: print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) return None except Exception as e: - session.rollback() LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) finally: diff --git a/src/kpi_manager/tests/test_kpi_db.py b/src/kpi_manager/tests/test_kpi_db.py index e961c12bacdbac07f111b229435ed3d89d62581f..d4a57f83664f851504389b3bbe99d5c2a92542d9 100644 --- a/src/kpi_manager/tests/test_kpi_db.py +++ b/src/kpi_manager/tests/test_kpi_db.py @@ -21,8 +21,8 @@ LOGGER = logging.getLogger(__name__) def test_verify_databases_and_Tables(): LOGGER.info('>>> test_verify_Tables : START <<< ') kpiDBobj = KpiDB() - kpiDBobj.drop_database() - kpiDBobj.verify_tables() + # kpiDBobj.drop_database() + # kpiDBobj.verify_tables() kpiDBobj.create_database() kpiDBobj.create_tables() kpiDBobj.verify_tables() diff --git a/src/kpi_manager/tests/test_kpi_manager.py b/src/kpi_manager/tests/test_kpi_manager.py index f0d9526d33694a683b70180eb3bc6de833bf1cfa..219fdadee9e2f4ca9ea9ac0be040043d4edfbdbe 100755 --- a/src/kpi_manager/tests/test_kpi_manager.py +++ b/src/kpi_manager/tests/test_kpi_manager.py @@ -17,7 +17,7 @@ import os, pytest import logging from typing import Union -#from common.proto.context_pb2 import Empty +from common.proto.context_pb2 import Empty from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) @@ -26,12 +26,6 @@ from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList from common.tools.service.GenericGrpcService import GenericGrpcService -#from context.client.ContextClient import ContextClient - -# from device.service.driver_api.DriverFactory import DriverFactory -# from device.service.driver_api.DriverInstanceCache import DriverInstanceCache -# from device.service.DeviceService import DeviceService -# from device.client.DeviceClient import DeviceClient from kpi_manager.tests.test_messages import create_kpi_descriptor_request, create_kpi_filter_request, create_kpi_descriptor_request_a from kpi_manager.service.KpiManagerService import KpiManagerService @@ -39,12 +33,6 @@ from kpi_manager.client.KpiManagerClient import KpiManagerClient from kpi_manager.tests.test_messages import create_kpi_descriptor_request from kpi_manager.tests.test_messages import create_kpi_id_request - -#from monitoring.service.NameMapping import NameMapping - -#os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' -#from device.service.drivers import DRIVERS - ########################### # Tests Setup ########################### @@ -55,8 +43,6 @@ KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) # t os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT) -# METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME'){} - LOGGER = logging.getLogger(__name__) class MockContextService(GenericGrpcService): @@ -70,84 +56,10 @@ class MockContextService(GenericGrpcService): self.context_servicer = MockServicerImpl_Context() add_ContextServiceServicer_to_server(self.context_servicer, self.server) -# @pytest.fixture(scope='session') -# def context_service(): -# LOGGER.info('Initializing MockContextService...') -# _service = MockContextService(MOCKSERVICE_PORT) -# _service.start() - -# LOGGER.info('Yielding MockContextService...') -# yield _service - -# LOGGER.info('Terminating MockContextService...') -# _service.context_servicer.msg_broker.terminate() -# _service.stop() - -# LOGGER.info('Terminated MockContextService...') - -# @pytest.fixture(scope='session') -# def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing ContextClient...') -# _client = ContextClient() - -# LOGGER.info('Yielding ContextClient...') -# yield _client - -# LOGGER.info('Closing ContextClient...') -# _client.close() - -# LOGGER.info('Closed ContextClient...') - -# @pytest.fixture(scope='session') -# def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing DeviceService...') -# driver_factory = DriverFactory(DRIVERS) -# driver_instance_cache = DriverInstanceCache(driver_factory) -# _service = DeviceService(driver_instance_cache) -# _service.start() - -# # yield the server, when test finishes, execution will resume to stop it -# LOGGER.info('Yielding DeviceService...') -# yield _service - -# LOGGER.info('Terminating DeviceService...') -# _service.stop() - -# LOGGER.info('Terminated DeviceService...') - -# @pytest.fixture(scope='session') -# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing DeviceClient...') -# _client = DeviceClient() - -# LOGGER.info('Yielding DeviceClient...') -# yield _client - -# LOGGER.info('Closing DeviceClient...') -# _client.close() - -# LOGGER.info('Closed DeviceClient...') - -# @pytest.fixture(scope='session') -# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument -# LOGGER.info('Initializing DeviceClient...') -# _client = DeviceClient() - -# LOGGER.info('Yielding DeviceClient...') -# yield _client - -# LOGGER.info('Closing DeviceClient...') -# _client.close() - -# LOGGER.info('Closed DeviceClient...') - # This fixture will be requested by test cases and last during testing session @pytest.fixture(scope='session') def kpi_manager_service(): LOGGER.info('Initializing KpiManagerService...') - #name_mapping = NameMapping() - # _service = MonitoringService(name_mapping) - # _service = KpiManagerService(name_mapping) _service = KpiManagerService() _service.start() @@ -181,35 +93,28 @@ def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disab # Prepare Environment, should be the first test ################################################## -# # ERROR on this test --- -# def test_prepare_environment( -# context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument -# ): -# context_id = json_context_id(DEFAULT_CONTEXT_NAME) -# context_client.SetContext(Context(**json_context(DEFAULT_CONTEXT_NAME))) -# context_client.SetTopology(Topology(**json_topology(DEFAULT_TOPOLOGY_NAME, context_id=context_id))) ########################### # Tests Implementation of Kpi Manager ########################### # ---------- 3rd Iteration Tests ---------------- -# def test_SetKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiId) +def test_SetKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") + response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) -# def test_DeleteKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ") -# # adding KPI -# response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# # deleting KPI -# del_response = kpi_manager_client.DeleteKpiDescriptor(response_id) -# # select KPI -# kpi_manager_client.GetKpiDescriptor(response_id) -# LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response)) -# assert isinstance(del_response, Empty) +def test_DeleteKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ") + # adding KPI + response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # deleting KPI + del_response = kpi_manager_client.DeleteKpiDescriptor(response_id) + # select KPI + kpi_manager_client.GetKpiDescriptor(response_id) + LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response)) + assert isinstance(del_response, Empty) def test_GetKpiDescriptor(kpi_manager_client): LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") @@ -225,77 +130,18 @@ def test_GetKpiDescriptor(kpi_manager_client): assert isinstance(response, KpiDescriptor) -# def test_SelectKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ") -# # adding KPI -# kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# # select KPI(s) -# response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiDescriptorList) - -# def test_set_list_of_KPIs(kpi_manager_client): -# LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ") -# KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"] -# # adding KPI -# for kpi in KPIs_TO_SEARCH: -# kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi)) - - -# ---------- 2nd Iteration Tests ----------------- -# def test_SetKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") -# with open("kpi_manager/tests/KPI_configs.json", 'r') as file: -# data = json.load(file) -# _descriptors = data.get('KPIs', []) -# for _descritor_name in _descriptors: -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(_descritor_name)) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiId) - -# def test_GetKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") -# response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiDescriptor) - -# def test_DeleteKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ") -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# del_response = kpi_manager_client.DeleteKpiDescriptor(response) -# kpi_manager_client.GetKpiDescriptor(response) -# LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response)) -# assert isinstance(del_response, Empty) - -# def test_SelectKpiDescriptor(kpi_manager_client): -# LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ") -# kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a()) -# response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request_a()) -# LOGGER.info("Response gRPC message object: {:}".format(response)) -# assert isinstance(response, KpiDescriptorList) - -# ------------- INITIAL TESTs ---------------- -# Test case that makes use of client fixture to test server's CreateKpi method -# def test_set_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name -# # make call to server -# LOGGER.warning('test_create_kpi requesting') -# for i in range(3): -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_request(str(i+1))) -# LOGGER.debug(str(response)) -# assert isinstance(response, KpiId) - -# # Test case that makes use of client fixture to test server's DeleteKpi method -# def test_delete_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name -# # make call to server -# LOGGER.warning('delete_kpi requesting') -# response = kpi_manager_client.SetKpiDescriptor(create_kpi_request('4')) -# response = kpi_manager_client.DeleteKpiDescriptor(response) -# LOGGER.debug(str(response)) -# assert isinstance(response, Empty) +def test_SelectKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ") + # adding KPI + kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # select KPI(s) + response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request()) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiDescriptorList) -# # Test case that makes use of client fixture to test server's GetKpiDescriptor method -# def test_select_kpi_descriptor(kpi_manager_client): # pylint: disable=redefined-outer-name -# LOGGER.warning('test_selectkpidescritor begin') -# response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request()) -# LOGGER.debug(str(response)) -# assert isinstance(response, KpiDescriptorList) +def test_set_list_of_KPIs(kpi_manager_client): + LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ") + KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"] + # adding KPI + for kpi in KPIs_TO_SEARCH: + kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi)) diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml index 166e9d3cbcf3eb09c914384a9906853dddd7bfb5..1a6f821ba9e798bb4220d914109ab3a65f0f1792 100644 --- a/src/kpi_value_api/.gitlab-ci.yml +++ b/src/kpi_value_api/.gitlab-ci.yml @@ -50,10 +50,30 @@ unit_test kpi-value-api: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi - docker container prune -f script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - - docker run --name $IMAGE_NAME -d -p 30020:30020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME -d -p 30020:30020 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - sleep 5 - docker ps -a - docker logs $IMAGE_NAME @@ -74,7 +94,7 @@ unit_test kpi-value-api: - src/$IMAGE_NAME/**/*.{py,in,yml} - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - - src/$IMAGE_NAME/tests/Dockerfile + # - src/$IMAGE_NAME/tests/Dockerfile # mayne not needed - manifests/${IMAGE_NAME}service.yaml - .gitlab-ci.yml artifacts: diff --git a/src/kpi_value_api/Dockerfile b/src/kpi_value_api/Dockerfile index 7dd8d307b8338c4a29e97c742ca12a49c4611e0a..25b8da931f88000dd229c536456a3eb1fa7f56db 100644 --- a/src/kpi_value_api/Dockerfile +++ b/src/kpi_value_api/Dockerfile @@ -63,6 +63,8 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory WORKDIR /var/teraflow COPY src/kpi_value_api/. kpi_value_api/ +COPY src/kpi_manager/__init__.py kpi_manager/__init__.py +COPY src/kpi_manager/client/. kpi_manager/client/ # Start the service ENTRYPOINT ["python", "-m", "kpi_value_api.service"] diff --git a/src/kpi_value_api/README.md b/src/kpi_value_api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..70ba2c5e79c79147e336307ecc6d5ddfc263df90 --- /dev/null +++ b/src/kpi_value_api/README.md @@ -0,0 +1,23 @@ +# How to locally run and test KPI Value API micro-service + +### Pre-requisets +Ensure the following requirements are met before executing the KPI Value API service. + +1. The KPI Manger service is running and Apache Kafka is running. + +2. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/requirements.in) file sucessfully installed. + +3. Call the ["create_all_topics()"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/common/tools/kafka/Variables.py) function to verify the existence of all required topics on kafka. + +### Messages format templates +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/messages.py) python file contains templates for creating gRPC messages. + +### Unit test file +The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/test_kpi_value_api.py) python file enlist various tests conducted to validate functionality. + +### Flow of execution (Kpi Maanager Service functions) +1. Call the `create_new_topic_if_not_exists(<list of string>)` method to create any new topics if needed. + +2. Call `StoreKpiValues(KpiValueList)` to produce `Kpi Value` on a Kafka Topic. (The `KpiValueWriter` microservice will consume and process the `Kpi Value`) + +3. Call `SelectKpiValues(KpiValueFilter) -> KpiValueList` to read metric from the Prometheus DB. diff --git a/src/kpi_value_api/requirements.in b/src/kpi_value_api/requirements.in index 7e4694109dc4e1d31b86abfc03162494faafcdaf..f5695906a8d02d55e15960a76986b8d03f02dba1 100644 --- a/src/kpi_value_api/requirements.in +++ b/src/kpi_value_api/requirements.in @@ -14,3 +14,4 @@ confluent-kafka==2.3.* requests==2.27.* +prometheus-api-client==0.5.3 \ No newline at end of file diff --git a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py index d27de54f3cddfd0d70d656a89c45adc50e518289..4ea978fafc8d7454d41f64182d553d030215113a 100644 --- a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py +++ b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py @@ -12,98 +12,141 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, grpc, requests -from typing import Tuple, Any -from datetime import datetime +import logging, grpc, json +from typing import Dict from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from common.proto.context_pb2 import Empty +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceServicer from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter, KpiValue, KpiValueType from confluent_kafka import Producer as KafkaProducer +from prometheus_api_client import PrometheusConnect +from prometheus_api_client.utils import parse_datetime + +from kpi_manager.client.KpiManagerClient import KpiManagerClient LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('KpiValueAPI', 'NBIgRPC') -PROM_URL = "http://localhost:9090" +PROM_URL = "http://prometheus-k8s.monitoring.svc.cluster.local:9090" # TODO: updated with the env variables class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer): def __init__(self): LOGGER.debug('Init KpiValueApiService') - + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def StoreKpiValues(self, request: KpiValueList, grpc_context: grpc.ServicerContext ) -> Empty: LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request)) - producer_obj = KafkaProducer({ - 'bootstrap.servers' : KafkaConfig.SERVER_IP.value - }) + + producer = self.kafka_producer for kpi_value in request.kpi_value_list: - kpi_value_to_produce : Tuple [str, Any, Any] = ( - kpi_value.kpi_id.kpi_id, - kpi_value.timestamp, - kpi_value.kpi_value_type # kpi_value.kpi_value_type.(many options) how? - ) + kpi_value_to_produce : Dict = { + "kpi_uuid" : kpi_value.kpi_id.kpi_id.uuid, + "timestamp" : kpi_value.timestamp.timestamp, + "kpi_value_type" : self.ExtractKpiValueByType(kpi_value.kpi_value_type) + } LOGGER.debug('KPI to produce is {:}'.format(kpi_value_to_produce)) msg_key = "gRPC-kpivalueapi" # str(__class__.__name__) can be used - producer_obj.produce( + producer.produce( KafkaTopic.VALUE.value, key = msg_key, - value = kpi_value.SerializeToString(), # value = json.dumps(kpi_value_to_produce), + value = json.dumps(kpi_value_to_produce), callback = self.delivery_callback ) - producer_obj.flush() + producer.flush() return Empty() + def ExtractKpiValueByType(self, value): + attributes = [ 'floatVal' , 'int32Val' , 'uint32Val','int64Val', + 'uint64Val', 'stringVal', 'boolVal'] + for attr in attributes: + try: + return getattr(value, attr) + except (ValueError, TypeError, AttributeError): + continue + return None + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SelectKpiValues(self, request: KpiValueFilter, grpc_context: grpc.ServicerContext ) -> KpiValueList: LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request)) response = KpiValueList() - metrics = [kpi.kpi_id for kpi in request.kpi_id] - start_timestamps = [timestamp for timestamp in request.start_timestamp] - end_timestamps = [timestamp for timestamp in request.end_timestamp] - results = [] + + kpi_manager_client = KpiManagerClient() + prom_connect = PrometheusConnect(url=PROM_URL) - for start, end in zip(start_timestamps, end_timestamps): - start_str = datetime.fromtimestamp(start.seconds).isoformat() + "Z" - end_str = datetime.fromtimestamp(end.seconds).isoformat() + "Z" + metrics = [self.GetKpiSampleType(kpi, kpi_manager_client) for kpi in request.kpi_id] + start_timestamps = [parse_datetime(timestamp) for timestamp in request.start_timestamp] + end_timestamps = [parse_datetime(timestamp) for timestamp in request.end_timestamp] + prom_response = [] + for start_time, end_time in zip(start_timestamps, end_timestamps): for metric in metrics: - url = f'{PROM_URL}/api/v1/query_range' - params = { - 'query': metric, - 'start': start_str, - 'end' : end_str, - 'step' : '30s' # or any other step you need - } - response = requests.get(url, params=params) - if response.status_code == 200: - data = response.json() - for result in data['data']['result']: - for value in result['values']: - kpi_value = KpiValue( - kpi_id=metric, - timestamp=str(seconds=value[0]), - kpi_value_type=self._convert_value_to_kpi_value_type(value[1]) - ) - results.append(kpi_value) - - def _convert_value_to_kpi_value_type(self, value): + print(start_time, end_time, metric) + LOGGER.debug(">>> Query: {:}".format(metric)) + prom_response.append( + prom_connect.custom_query_range( + query = metric, # this is the metric name and label config + start_time = start_time, + end_time = end_time, + step = 30, # or any other step value (missing in gRPC Filter request) + ) + ) + + for single_resposne in prom_response: + # print ("{:}".format(single_resposne)) + for record in single_resposne: + # print("Record >>> kpi: {:} >>> time & values set: {:}".format(record['metric']['__name__'], record['values'])) + for value in record['values']: + # print("{:} - {:}".format(record['metric']['__name__'], value)) + kpi_value = KpiValue() + kpi_value.kpi_id.kpi_id = record['metric']['__name__'], + kpi_value.timestamp = value[0], + kpi_value.kpi_value_type = self.ConverValueToKpiValueType(value[1]) + response.kpi_value_list.append(kpi_value) + return response + + def GetKpiSampleType(self, kpi_value: str, kpi_manager_client): + print("--- START -----") + + kpi_id = KpiId() + kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid + # print("KpiId generated: {:}".format(kpi_id)) + + try: + kpi_descriptor_object = KpiDescriptor() + kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id) + # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object??? + if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid: + LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) + return KpiSampleType.Name(kpi_descriptor_object.kpi_sample_type) # extract and return the name of KpiSampleType + else: + LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) + except Exception as e: + LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e)) + print ("Unable to get KpiDescriptor. Error: {:}".format(e)) + + def ConverValueToKpiValueType(self, value): # Check if the value is an integer (int64) try: - int64_value = int(value) - return KpiValueType(int64Val=int64_value) - except ValueError: + int_value = int(value) + return KpiValueType(int64Val=int_value) + except (ValueError, TypeError): pass # Check if the value is a float try: float_value = float(value) return KpiValueType(floatVal=float_value) - except ValueError: + except (ValueError, TypeError): pass # Check if the value is a boolean if value.lower() in ['true', 'false']: @@ -112,7 +155,6 @@ class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer): # If none of the above, treat it as a string return KpiValueType(stringVal=value) - def delivery_callback(self, err, msg): if err: LOGGER.debug('Message delivery failed: {:}'.format(err)) else: LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) diff --git a/src/kpi_value_api/tests/messages.py b/src/kpi_value_api/tests/messages.py index c2a1cbb0b275fb26d6498e4470f3869a105a8d36..d8ad14bd44eebc1e9412cfd5ff2973e6018c95e9 100644 --- a/src/kpi_value_api/tests/messages.py +++ b/src/kpi_value_api/tests/messages.py @@ -18,8 +18,9 @@ from common.proto.kpi_value_api_pb2 import KpiValue, KpiValueList def create_kpi_value_list(): _create_kpi_value_list = KpiValueList() - # To run this experiment sucessfully, already existing UUID in KPI DB in necessary. - # because the UUID is used to get the descriptor form KPI DB. + # To run this experiment sucessfully, add an existing UUID of a KPI Descriptor from the KPI DB. + # This UUID is used to get the descriptor form the KPI DB. If the Kpi ID does not exists, + # some part of the code won't execute. EXISTING_KPI_IDs = ["725ce3ad-ac67-4373-bd35-8cd9d6a86e09", str(uuid.uuid4()), str(uuid.uuid4())] diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml index 25619ce7f8b4346172587dbf2e804896aff20e4d..9a2f9fd47e435b26e2e3a335bd9b95da58a0517f 100644 --- a/src/kpi_value_writer/.gitlab-ci.yml +++ b/src/kpi_value_writer/.gitlab-ci.yml @@ -50,10 +50,30 @@ unit_test kpi-value-writer: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi - docker container prune -f script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - - docker run --name $IMAGE_NAME -d -p 30030:30030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME -d -p 30030:30030 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - sleep 5 - docker ps -a - docker logs $IMAGE_NAME @@ -64,6 +84,8 @@ unit_test kpi-value-writer: coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: - docker rm -f $IMAGE_NAME + - docker rm -f zookeeper + - docker rm -f kafka - docker network rm teraflowbridge rules: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' diff --git a/src/kpi_value_writer/README.md b/src/kpi_value_writer/README.md index 72ba6e5594adeef4a29d650615716c26273ed115..c45a0e39534fae9efef4174d5ca5be7047845c48 100644 --- a/src/kpi_value_writer/README.md +++ b/src/kpi_value_writer/README.md @@ -1,29 +1,17 @@ -# How to locally run and test KPI manager micro-service +# How to locally run and test the KPI Value Writer micro-service -## --- File links need to be updated. --- ### Pre-requisets -The following requirements should be fulfilled before the execuation of KPI management service. +Ensure the following requirements are meet before executing the KPI Value Writer service> -1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. -2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully. -3. Verify the creation of required database and table. -[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and -[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment. +1. The KPI Manger and KPI Value API services are running and Apache Kafka is running. -### Messages format templates -["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing. - -### Test file -["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment. - -### Flow of execution (Kpi Maanager Service functions) -1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table. +2. A Virtual enviornment exist with all the required packages listed in the ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/requirements.in) file installed sucessfully. -2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types. - -3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB. +### Messages format templates +The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_messages.py) python file contains the templates to create gRPC messages. -4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types. +### Unit test file +The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_kpi_value_writer.py) python file enlist various tests conducted to validate functionality. -## For KPI composer and KPI writer -The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in). \ No newline at end of file +### Flow of execution +1. Call the `RunKafkaConsumer` method from the `KpiValueWriter` class to start consuming the `KPI Value` generated by the `KPI Value API` or `Telemetry`. For every valid `KPI Value` consumer from Kafka, it invokes the `PrometheusWriter` class to prepare and push the metric to the Promethues DB. diff --git a/src/kpi_value_writer/service/KpiValueWriter.py b/src/kpi_value_writer/service/KpiValueWriter.py index 26bab44657606b1f3edc14659d128c5ccc7a6890..8b258a1424cc44be4dcb9134ee913c707cc44bfa 100644 --- a/src/kpi_value_writer/service/KpiValueWriter.py +++ b/src/kpi_value_writer/service/KpiValueWriter.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging import threading from common.tools.kafka.Variables import KafkaConfig, KafkaTopic @@ -33,32 +34,30 @@ from .MetricWriterToPrometheus import MetricWriterToPrometheus LOGGER = logging.getLogger(__name__) ACTIVE_CONSUMERS = [] -METRIC_WRITER = MetricWriterToPrometheus() class KpiValueWriter(GenericGrpcService): def __init__(self, cls_name : str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.KPIVALUEWRITER) super().__init__(port, cls_name=cls_name) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'KpiValueWriter', + 'auto.offset.reset' : 'latest'}) - @staticmethod - def RunKafkaConsumer(): - thread = threading.Thread(target=KpiValueWriter.KafkaConsumer, args=()) + def RunKafkaConsumer(self): + thread = threading.Thread(target=self.KafkaKpiConsumer, args=()) ACTIVE_CONSUMERS.append(thread) thread.start() - @staticmethod - def KafkaConsumer(): - kafka_consumer = KafkaConsumer( - { 'bootstrap.servers' : KafkaConfig.SERVER_IP.value, - 'group.id' : __class__, - 'auto.offset.reset' : 'latest'} - ) + def KafkaKpiConsumer(self): kpi_manager_client = KpiManagerClient() - kafka_consumer.subscribe([KafkaTopic.VALUE.value]) + metric_writer = MetricWriterToPrometheus() + + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.VALUE.value]) LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value)) print("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value)) while True: - raw_kpi = kafka_consumer.poll(1.0) + raw_kpi = consumer.poll(1.0) if raw_kpi is None: continue elif raw_kpi.error(): @@ -68,33 +67,29 @@ class KpiValueWriter(GenericGrpcService): print("Consumer error: {}".format(raw_kpi.error())) continue try: - kpi_value = KpiValue() - kpi_value.ParseFromString(raw_kpi.value()) + kpi_value = json.loads(raw_kpi.value().decode('utf-8')) LOGGER.info("Received KPI : {:}".format(kpi_value)) print("Received KPI : {:}".format(kpi_value)) - KpiValueWriter.get_kpi_descriptor(kpi_value, kpi_manager_client) + self.get_kpi_descriptor(kpi_value, kpi_manager_client, metric_writer) except Exception as e: print("Error detail: {:}".format(e)) continue - @staticmethod - def get_kpi_descriptor(kpi_value: str, kpi_manager_client ): + def get_kpi_descriptor(self, kpi_value: str, kpi_manager_client, metric_writer): print("--- START -----") kpi_id = KpiId() - kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid + kpi_id.kpi_id.uuid = kpi_value['kpi_uuid'] print("KpiId generated: {:}".format(kpi_id)) # print("Kpi manger client created: {:}".format(kpi_manager_client)) - try: kpi_descriptor_object = KpiDescriptor() kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id) + # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object??? if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid: - # print("kpi descriptor received: {:}".format(kpi_descriptor_object)) - # if isinstance (kpi_descriptor_object, KpiDescriptor): LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) - METRIC_WRITER.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value) + metric_writer.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value) else: LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) diff --git a/src/kpi_value_writer/service/MetricWriterToPrometheus.py b/src/kpi_value_writer/service/MetricWriterToPrometheus.py index b681164786bd310d457998bae55b836522888b94..85e618a4b5b330cb83cf255652e7be8dff2dabd3 100644 --- a/src/kpi_value_writer/service/MetricWriterToPrometheus.py +++ b/src/kpi_value_writer/service/MetricWriterToPrometheus.py @@ -14,11 +14,9 @@ # read Kafka stream from Kafka topic -import ast -import time -import threading import logging -from prometheus_client import start_http_server, Gauge, CollectorRegistry +from typing import Dict +from prometheus_client import Gauge from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.proto.kpi_value_api_pb2 import KpiValue @@ -26,7 +24,6 @@ from common.proto.kpi_manager_pb2 import KpiDescriptor LOGGER = logging.getLogger(__name__) PROM_METRICS = {} -PROM_REGISTERY = CollectorRegistry() class MetricWriterToPrometheus: ''' @@ -34,13 +31,7 @@ class MetricWriterToPrometheus: cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message) ''' def __init__(self): - # prometheus server address and configs - self.start_prometheus_client() pass - - def start_prometheus_client(self): - start_http_server(10808, registry=PROM_REGISTERY) - LOGGER.debug("Prometheus client is started on port 10808") def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value): # Creating a dictionary from the kpi_descriptor's attributes @@ -54,25 +45,24 @@ class MetricWriterToPrometheus: 'slice_id' : kpi_descriptor.slice_id.slice_uuid.uuid, 'connection_id' : kpi_descriptor.connection_id.connection_uuid.uuid, 'link_id' : kpi_descriptor.link_id.link_uuid.uuid, - 'time_stamp' : kpi_value.timestamp.timestamp, - 'kpi_value' : kpi_value.kpi_value_type.floatVal + 'time_stamp' : kpi_value['timestamp'], + 'kpi_value' : kpi_value['kpi_value_type'] } # LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi)) return cooked_kpi - def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: KpiValue): + def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: Dict): # merge both gRPC messages into single varible. cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value) - tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} # extracted values will be used as metric tag - metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude] + tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} + metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude] # These values will be used as metric tags metric_name = cooked_kpi['kpi_sample_type'] try: if metric_name not in PROM_METRICS: # Only register the metric, when it doesn't exists PROM_METRICS[metric_name] = Gauge ( metric_name, cooked_kpi['kpi_description'], - metric_tags, - registry=PROM_REGISTERY + metric_tags ) LOGGER.debug("Metric is created with labels: {:}".format(metric_tags)) PROM_METRICS[metric_name].labels( @@ -84,7 +74,7 @@ class MetricWriterToPrometheus: connection_id = cooked_kpi['connection_id'], link_id = cooked_kpi['link_id'], time_stamp = cooked_kpi['time_stamp'], - ).set(float(cooked_kpi['kpi_value'])) + ).set(cooked_kpi['kpi_value']) LOGGER.debug("Metric pushed to the endpoints: {:}".format(PROM_METRICS[metric_name])) except ValueError as e: @@ -93,4 +83,5 @@ class MetricWriterToPrometheus: print("Metric {:} is already registered. Skipping.".format(metric_name)) else: LOGGER.error("Error while pushing metric: {}".format(e)) - raise \ No newline at end of file + raise + diff --git a/src/kpi_value_writer/service/__main__.py b/src/kpi_value_writer/service/__main__.py index aa67540fb899781297d1235dc2e15bcbb2c38585..be9f8f29bfdb2397eedd0ce2821c5da8f778cfc4 100644 --- a/src/kpi_value_writer/service/__main__.py +++ b/src/kpi_value_writer/service/__main__.py @@ -13,6 +13,7 @@ # limitations under the License. import logging, signal, sys, threading +from prometheus_client import start_http_server from kpi_value_writer.service.KpiValueWriter import KpiValueWriter from common.Settings import get_log_level @@ -38,6 +39,8 @@ def main(): grpc_service = KpiValueWriter() grpc_service.start() + start_http_server(10808) + LOGGER.debug("Prometheus client is started on port 10808") # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=1.0): pass diff --git a/src/kpi_value_writer/tests/test_kpi_value_writer.py b/src/kpi_value_writer/tests/test_kpi_value_writer.py index 572495d48d70cdc40c0ef6bb1efcf877e2a610ee..b784fae5da713f9bd7cd7a1668f48b080f7a84fa 100755 --- a/src/kpi_value_writer/tests/test_kpi_value_writer.py +++ b/src/kpi_value_writer/tests/test_kpi_value_writer.py @@ -14,31 +14,12 @@ import logging from kpi_value_writer.service.KpiValueWriter import KpiValueWriter + from common.tools.kafka.Variables import KafkaTopic -from kpi_manager.client.KpiManagerClient import KpiManagerClient -from kpi_manager.tests.test_messages import create_kpi_descriptor_request -from common.proto.kpi_manager_pb2 import KpiDescriptor -from kpi_value_writer.tests.test_messages import create_kpi_id_request -LOGGER = logging.getLogger(__name__) -# def test_GetKpiDescriptor(): -# LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") -# kpi_manager_client = KpiManagerClient() -# # adding KPI -# LOGGER.info(" --->>> calling SetKpiDescriptor ") -# response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) -# # get KPI -# LOGGER.info(" --->>> calling GetKpiDescriptor with response ID") -# response = kpi_manager_client.GetKpiDescriptor(response_id) -# LOGGER.info("Response gRPC message object: {:}".format(response)) - -# LOGGER.info(" --->>> calling GetKpiDescriptor with random ID") -# rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request()) -# LOGGER.info("Response gRPC message object: {:}".format(rand_response)) -# LOGGER.info("\n------------------ TEST FINISHED ---------------------\n") -# assert isinstance(response, KpiDescriptor) +LOGGER = logging.getLogger(__name__) # -------- Initial Test ---------------- def test_validate_kafka_topics(): @@ -48,5 +29,5 @@ def test_validate_kafka_topics(): def test_KafkaConsumer(): LOGGER.debug(" --->>> test_kafka_consumer: START <<<--- ") - KpiValueWriter.RunKafkaConsumer() - + kpi_value_writer = KpiValueWriter() + kpi_value_writer.RunKafkaConsumer() diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py index a7404b924a44e9125dbf84bdcdfab3b9af790e5d..2282de557c1a80227c7d50e7c125ab4fe538bd28 100644 --- a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py @@ -50,4 +50,4 @@ class Hardware(Resource): LOGGER.exception(MSG.format(str(device_uuid))) response = jsonify({'error': str(e)}) response.status_code = HTTP_SERVERERROR - return response \ No newline at end of file + return response diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/HardwareMultipleDevices.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/HardwareMultipleDevices.py new file mode 100644 index 0000000000000000000000000000000000000000..b1beff518bb3997fc04a79e78c3467b47bd51483 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/HardwareMultipleDevices.py @@ -0,0 +1,50 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from flask import request +from flask.json import jsonify +from flask_restful import Resource +from common.proto.context_pb2 import Empty +from context.client.ContextClient import ContextClient +from ..tools.Authentication import HTTP_AUTH +from ..tools.HttpStatusCodes import HTTP_OK, HTTP_SERVERERROR +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class HardwareMultipleDevices(Resource): + @HTTP_AUTH.login_required + def get(self): + + LOGGER.debug('Request: {:s}'.format(str(request))) + + try: + context_client = ContextClient() + list_devices = context_client.ListDevices(Empty()) + LOGGER.info('Request: {:s}'.format(str(list_devices))) + hardware_list_reply = [] + yang_handler = YangHandler() + for device in list_devices.devices: + hardware_reply = yang_handler.compose(device) + hardware_list_reply.append(hardware_reply) + + yang_handler.destroy() + response = jsonify(hardware_list_reply) + response.status_code = HTTP_OK + except Exception as e: # pylint: disable=broad-except + MSG = 'Something went wrong Retrieving Hardware of Devices({:s})' + response = jsonify({'error': str(e)}) + response.status_code = HTTP_SERVERERROR + return response diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py index 4534294857e0c7972f4764927fe6d23bc656ad5f..7662261e97b35958f036dc0e69913af7947b9403 100644 --- a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py @@ -12,19 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -import libyang, os from common.proto.context_pb2 import Device from typing import Dict, Optional +import datetime import json import logging +import libyang +import os import re -import datetime LOGGER = logging.getLogger(__name__) YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang') YANG_MODULES = [ 'iana-hardware', - 'ietf-hardware' + 'ietf-hardware', + 'ietf-network-hardware-inventory' ] class YangHandler: @@ -35,7 +37,7 @@ class YangHandler: self._yang_context.load_module(yang_module_name).feature_enable_all() def parse_to_dict(self, message : Dict) -> Dict: - yang_module = self._yang_context.get_module('ietf-hardware') + yang_module = self._yang_context.get_module('ietf-network-hardware-inventory') dnode : Optional[libyang.DNode] = yang_module.parse_data_dict( message, validate_present=True, validate=True, strict=True ) @@ -44,66 +46,54 @@ class YangHandler: dnode.free() return message - @staticmethod def convert_to_iso_date(date_str: str) -> Optional[str]: date_str = date_str.strip('"') - # Define the regex pattern for ISO 8601 date format pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[\+\-]\d{2}:\d{2})" - # Check if the input date string matches the pattern if re.match(pattern, date_str): - return date_str # Already in ISO format + return date_str else: try: - # Parse the input date string as a datetime object datetime_obj = datetime.datetime.strptime(date_str, "%Y-%m-%d") - # Convert to ISO format iso_date = datetime_obj.isoformat() + "Z" return iso_date except ValueError: - return None # Invalid date format - + return None def compose(self, device : Device) -> Dict: - # compose device iterating through the components - - hardware = self._yang_context.create_data_path('/ietf-hardware:hardware') + hardware = self._yang_context.create_data_path('/ietf-network-hardware-inventory:network-hardware-inventory') + network_elements = hardware.create_path('network-elements') + + network_element = network_elements.create_path('network-element[uuid="{:s}"]'.format(device.device_id.device_uuid.uuid)) + network_element.create_path('uuid', device.device_id.device_uuid.uuid) + network_element.create_path('name', device.name) + components = network_element.create_path('components') physical_index = 1 - + for component in device.components: attributes = component.attributes - - component_new = hardware.create_path('component[name="{:s}"]'.format(component.name)) + component_new = components.create_path('component[uuid="{:s}"]'.format(component.component_uuid.uuid)) component_new.create_path('name', component.name) - - #Cambiar las clases especiales, su formato y añadir isfru component_type = component.type if component_type == "TRANSCEIVER" : component_type = "module" - if component_type == "FRU" : component_type = "slack" - component_new.create_path('is-fru', True) - else : - component_new.create_path('is-fru', False) - + component_type = component_type.replace("_", "-").lower() component_type = 'iana-hardware:' + component_type - component_new.create_path('class', component_type) - - #Añadir resto de atributos en IETF - physical_index += 1 - component_new.create_path('physical-index', physical_index) - - component_new.create_path('description', attributes["description"]) - - component_new.create_path('parent', component.parent) - + component_new.create_path('description', attributes["description"].replace('/"',"")) + if "CHASSIS" not in component.type: + parent_component_references = component_new.create_path('parent-component-references') + parent = parent_component_references.create_path('component-reference[index="{:d}"]'.format(physical_index)) + for component_parent in device.components: + if component.parent == component_parent.name : + parent.create_path('uuid', component_parent.component_uuid.uuid) + break if attributes["mfg-date"] != "": mfg_date = self.convert_to_iso_date(attributes["mfg-date"]) - LOGGER.info('component[name="{:s}"]'.format(attributes["mfg-date"])) component_new.create_path('mfg-date', mfg_date) component_new.create_path('hardware-rev', attributes["hardware-rev"]) @@ -111,22 +101,31 @@ class YangHandler: component_new.create_path('firmware-rev', attributes["firmware-version"]) component_new.create_path('serial-num', attributes["serial-num"]) component_new.create_path('mfg-name', attributes["mfg-name"]) + if attributes["removable"]: + removable = attributes["removable"].lower() + if 'true' in removable: + component_new.create_path('is-fru', True) + elif 'false' in removable: + component_new.create_path('is-fru', False) + if attributes["id"]: - component_new.create_path('parent-rel-pos', attributes["id"]) - - component_new.create_path('uri', component.name) - + try: + if "CHASSIS" in component.type : + component_new.create_path('parent-rel-pos', 0) + else: + parent_rel_pos = int(attributes["id"].replace("\"", "")) + component_new.create_path('parent-rel-pos', parent_rel_pos) + except ValueError: + LOGGER.info('ERROR:{:s} '.format(component.name )) + continue + component_new.create_path('uri', component.name) component_new.create_path('uuid', component.component_uuid.uuid) - - contains_child = [] - for component2 in device.components: - if component.name == component2.parent : - contains_child.append(component2.name) - - component_new.create_path('contains-child', contains_child) + for child in device.components: + if component.name == child.parent : + component_new.create_path('contained-child', child.component_uuid.uuid) return json.loads(hardware.print_mem('json')) - + def destroy(self) -> None: - self._yang_context.destroy() \ No newline at end of file + self._yang_context.destroy() diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py index 4a0cedcc4df9a198621e2585bafef1768ad1f8ca..ba774650e89e26609573a364be520c2d1bd6df84 100644 --- a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py @@ -13,9 +13,12 @@ # limitations under the License. from nbi.service.rest_server.nbi_plugins.ietf_hardware.Hardware import Hardware +from nbi.service.rest_server.nbi_plugins.ietf_hardware.HardwareMultipleDevices import HardwareMultipleDevices from nbi.service.rest_server.RestServer import RestServer -URL_PREFIX = "/restconf/data/device=<path:device_uuid>/ietf-hardware:hardware" +URL_PREFIX_DEVICE = "/restconf/data/device=<path:device_uuid>/ietf-network-hardware-inventory:network-hardware-inventory" +URL_PREFIX_HARDWARE = "/restconf/data/ietf-network-hardware-inventory:network-hardware-inventory" def register_ietf_hardware(rest_server: RestServer): - rest_server.add_resource(Hardware, URL_PREFIX) \ No newline at end of file + rest_server.add_resource(Hardware, URL_PREFIX_DEVICE) + rest_server.add_resource(HardwareMultipleDevices, URL_PREFIX_HARDWARE) diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-network-hardware-inventory@2023-03-09.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-network-hardware-inventory@2023-03-09.yang new file mode 100644 index 0000000000000000000000000000000000000000..e074e3005e97f9657f7ef23a39741d3ce4b912b8 --- /dev/null +++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-network-hardware-inventory@2023-03-09.yang @@ -0,0 +1,604 @@ +module ietf-network-hardware-inventory { + yang-version 1.1; + namespace + "urn:ietf:params:xml:ns:yang:ietf-network-hardware-inventory"; + prefix nhi; + + import ietf-yang-types { + prefix yang; + reference + "RFC6991: Common YANG Data Types."; + } + + import iana-hardware { + prefix ianahw; + reference + "https://www.iana.org/assignments/yang-parameters"; + } + + import ietf-inet-types { + prefix inet; + reference + "RFC6991: Common YANG Data Types."; + } + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: <https://datatracker.ietf.org/wg/ccamp/> + WG List: <mailto:ccamp@ietf.org> + + Editor: Chaode Yu + <yuchaode@huawei.com> + + Editor: Italo Busi + <italo.busi@huawei.com> + + Editor: Aihua Guo + <aihuaguo.ietf@gmail.com> + + Editor: Sergio Belotti + <sergio.belotti@nokia.com> + + Editor: Jean-Francois Bouquier + <jeff.bouquier@vodafone.com> + + Editor: Fabio Peruzzini + <fabio.peruzzini@telecomitalia.it>"; + + description + "This module defines a model for retrieving network hardware + inventory. + + The model fully conforms to the Network Management + Datastore Architecture (NMDA). + Copyright (c) 2022 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + // RFC Ed.: replace XXXX with actual RFC number and remove this + // note. + // RFC Ed.: update the date below with the date of RFC publication + // and remove this note. + + revision 2023-03-09 { + description + "Initial version"; + reference + "RFC XXXX: A YANG Data Model for Network Hardware Inventory."; + //RFC Editor: replace XXXX with actual RFC number, update date + //information and remove this note + } + + container network-hardware-inventory { + config false; + description + "The top-level container for the network inventory + information."; + uses equipment-rooms-grouping; + uses network-elements-grouping; + } + + grouping common-entity-attributes { + description + "A set of attributes which are common to all the entities + (e.g., component, equipment room) defined in this module."; + leaf uuid { + type yang:uuid; + description + "Uniquely identifies an entity (e.g., component)."; + } + leaf name { + type string; + description + "A name for an entity (e.g., component), as specified by + a network manager, that provides a non-volatile 'handle' + for the entity and that can be modified anytime during the + entity lifetime. + + If no configured value exists, the server MAY set the value + of this node to a locally unique value in the operational + state."; + } + leaf description { + type string; + description "a textual description of inventory object"; + } + leaf alias { + type string; + description + "a alias name of inventory objects. This alias name can be + specified by network manager."; + } + } + + grouping network-elements-grouping { + description + "The attributes of the network elements."; + container network-elements { + description + "The container for the list of network elements."; + list network-element { + key uuid; + description + "The list of network elements within the network."; + uses common-entity-attributes; + container ne-location { + description + "The location information of this network element."; + leaf-list equipment-room-name { + type leafref { + path "/nhi:network-hardware-inventory/" + + "nhi:equipment-rooms/nhi:equipment-room/nhi:name"; + } + description + "Names of equipment rooms where the NE is located. + Please note that a NE could be located in several + equipment rooms."; + } + } + uses ne-specific-info-grouping; + uses components-grouping; + } + } + } + + grouping ne-specific-info-grouping { + description + "Attributes applicable to network elements."; + leaf hardware-rev { + type string; + description + "The vendor-specific hardware revision string for the NE."; + } + leaf software-rev { + type string; + description + "The vendor-specific software revision string for the NE."; + } + leaf mfg-name { + type string; + description "The name of the manufacturer of this NE"; + } + leaf mfg-date { + type yang:date-and-time; + description "The date of manufacturing of the NE."; + } + leaf part-number { + type string; + description + "The vendor-specific model name identifier string associated + with this NE. The preferred value is the customer-visible + part number, which may be printed on the NE itself."; + } + leaf serial-number { + type string; + description + "The vendor-specific serial number string for the NE"; + } + leaf product-name { + type string; + description + "indicates the vendor-spefic device type infomation."; + } + } + + grouping equipment-rooms-grouping { + description + "The attributes of the equipment rooms."; + container equipment-rooms { + description + "The container for the list of equipment rooms."; + list equipment-room { + key uuid; + description + "The list of equipment rooms within the network."; + uses common-entity-attributes; + leaf location { + type string; + description + "compared with the location information of the other + inventory objects, a GIS address is preferred for + equipment room"; + } + container racks { + description + "Top level container for the list of racks."; + list rack { + key uuid; + description + "The list of racks within an equipment room."; + uses common-entity-attributes; + uses rack-specific-info-grouping; + list contained-chassis { + key "ne-ref component-ref"; + description + "The list of chassis within a rack."; + leaf ne-ref { + type leafref { + path "/nhi:network-hardware-inventory" + + "/nhi:network-elements/nhi:network-element" + + "/nhi:uuid"; + } + description + "The reference to the network element containing + the chassis component."; + } + leaf component-ref { + type leafref { + path "/nhi:network-hardware-inventory" + + "/nhi:network-elements/nhi:network-element" + + "[nhi:uuid=current()/../ne-ref]/nhi:components" + + "/nhi:component/nhi:uuid"; + } + description + "The reference to the chassis component within + the network element and contained by the rack."; + } + leaf relative-position { + type uint8; + description "A relative position of chassis within + the rack"; + } + } + } + } + } + } + } + + grouping rack-specific-info-grouping { + description + "Attributes applicable to racks only."; + container rack-location { + description + "The location information of the rack, which comprises the + name of the equipment room, row number, and column number."; + leaf equipment-room-name { + type leafref { + path "/nhi:network-hardware-inventory/nhi:equipment-rooms" + + "/nhi:equipment-room/nhi:name"; + } + description + "Name of equipment room where this rack is located."; + } + leaf row-number { + type uint32; + description + "Identifies the row within the equipment room where + the rack is located."; + } + leaf column-number { + type uint32; + description + "Identifies the physical location of the rack within + the column."; + } + } + leaf height { + type uint16; + units millimeter; + description + "Rack height."; + } + leaf width { + type uint16; + units millimeter; + description + "Rack width."; + } + leaf depth { + type uint16; + units millimeter; + description + "Rack depth."; + } + leaf max-voltage { + type uint16; + units volt; + description + "The maximum voltage could be supported by the rack."; + } + } + + grouping components-grouping { + description + "The attributes of the hardware components."; + container components { + description + "The container for the list of components."; + list component { + key uuid; + description + "The list of components within a network element."; + uses common-entity-attributes; + leaf location { + type string; + description + "A relative location information of this component. + In optical transport network, the location string is + using the following pattern: + '/ne=<nw-ne-name>[/r=<r_index>][/sh=<sh_index> + [/s_sh=<s_sh_index> ...]][[/sl=<sl_index> + [/s_sl=<s_sl_index> ...]][/p=<p_index> …]]' + "; + } + leaf class { + type identityref { + base ianahw:hardware-class; + } + description + "An indication of the general hardware type of the + component."; + reference + "RFC 8348: A YANG Data Model for Hardware Management."; + } + leaf-list contained-child { + type leafref { + path "../nhi:uuid"; + } + description + "The list of the identifiers of the child components + physically contained within this component."; + } + leaf parent-rel-pos { + type int32 { + range "0 .. 2147483647"; + } + description + "The relative position with respect to the parent + component among all the sibling components."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalParentRelPos"; + } + + container parent-component-references { + description + "The top level container for the list of the + identifiers of the parents of this component in a + hierarchy."; + list component-reference { + key index; + description + "The list of the identifiers of the parents of this + component in a hierarchy. + + The index parameter defines the hierarchy: the topmost + parent has an index of 0."; + leaf index { + type uint8; + description + "The index of the parent with respect to the + hierarchy."; + } + leaf class { + type leafref { + path "../../../nhi:class"; + } + description + "Class of the hierarchial parent component."; + } + leaf uuid { + type leafref { + path "../../../nhi:uuid"; + } + description + "The identifier of the parent's component in the + hierarchy."; + } + } + } + + leaf hardware-rev { + type string; + description + "The vendor-specific hardware revision string for the + component. The preferred value is the hardware revision + identifier actually printed on the component itself (if + present)."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalHardwareRev"; + } + leaf firmware-rev { + type string; + description + "The vendor-specific firmware revision string for the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalFirmwareRev"; + } + leaf software-rev { + type string; + description + "The vendor-specific software revision string for the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalSoftwareRev"; + } + leaf serial-num { + type string; + description + "The vendor-specific serial number string for the + component. The preferred value is the serial number + string actually printed on the component itself (if + present)."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalSerialNum"; + } + leaf mfg-name { + type string; + description + "The name of the manufacturer of this physical component. + The preferred value is the manufacturer name string + actually printed on the component itself (if present). + + Note that comparisons between instances of the + 'model-name', 'firmware-rev', 'software-rev', and + 'serial-num' nodes are only meaningful amongst + components with the same value of 'mfg-name'. + + If the manufacturer name string associated with the + physical component is unknown to the server, then this + node is not instantiated."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgName"; + } + leaf part-number { + type string; + description + "The vendor-specific model name identifier string + associated with this physical component. The preferred + value is the customer-visible part number, which may be + printed on the component itself. + + If the model name string associated with the physical + component is unknown to the server, then this node is + not instantiated."; + reference + "RFC 6933: Entity MIB (Version 4) - + entPhysicalModelName"; + } + leaf asset-id { + type string; + description + "This node is a user-assigned asset tracking identifier + for the component. + + A server implementation MAY map this leaf to the + entPhysicalAssetID MIB object. Such an implementation + needs to use some mechanism to handle the differences in + size and characters allowed between this leaf and + entPhysicalAssetID. The definition of such a mechanism + is outside the scope of this document."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalAssetID"; + } + leaf is-fru { + type boolean; + description + "This node indicates whether or not this component is + considered a 'field-replaceable unit' by the vendor. If + this node contains the value 'true', then this component + identifies a field-replaceable unit. For all components + that are permanently contained within a + field-replaceable unit, the value 'false' should be + returned for this node."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalIsFRU"; + } + leaf mfg-date { + type yang:date-and-time; + description + "The date of manufacturing of the managed component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgDate"; + } + leaf-list uri { + type inet:uri; + description + "This node contains identification information about the + component."; + reference + "RFC 6933: Entity MIB (Version 4) - entPhysicalUris"; + } + uses component-specific-info-grouping; + } + } + } + + grouping component-specific-info-grouping { + description + "In case if there are some missing attributes of component not + defined by RFC8348. These attributes could be + component-specific. + Here we provide a extension structure for all the components + we recognized. We will enrich these component specifc + containers in the future."; + choice component-class { + description + "This extension differs between different component + classes."; + case chassis { + when "./class = 'ianahw:chassis'"; + container chassis-specific-info { + description + "This container contains some attributes belong to + chassis only."; + uses chassis-specific-info-grouping; + } + } + case container { + when "./class = 'ianahw:container'"; + container slot-specific-info { + description + "This container contains some attributes belong to + slot or sub-slot only."; + uses slot-specific-info-grouping; + } + } + case module { + when "./nhi:class = 'ianahw:module'"; + container board-specific-info { + description + "This container contains some attributes belong to + board only."; + uses board-specific-info-grouping; + } + } + case port { + when "./nhi:class = 'ianahw:port'"; + container port-specific-info { + description + "This container contains some attributes belong to + port only."; + uses port-specific-info-grouping; + } + } + //TO BE ADDED: transceiver + } + } + + grouping chassis-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to chassis only."; + } + + grouping slot-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to slots only."; + } + + grouping board-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to boards only."; + } + + grouping port-specific-info-grouping { + //To be enriched in the future. + description + "Specific attributes applicable to ports only."; + } +} diff --git a/src/telemetry/.gitlab-ci.yml b/src/telemetry/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..110a6490d20558c6589550be45b6432e500ba9d6 --- /dev/null +++ b/src/telemetry/.gitlab-ci.yml @@ -0,0 +1,203 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build telemetry: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + # This first build tags the builder resulting image to prevent being removed by dangling image removal command + # - docker buildx build -t "${IMAGE_NAME}-backend:${IMAGE_TAG}-builder" --target builder -f ./src/$IMAGE_NAME/backend/Dockerfile . + - docker buildx build -t "${IMAGE_NAME}-frontend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/frontend/Dockerfile . + - docker buildx build -t "${IMAGE_NAME}-backend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/backend/Dockerfile . + - docker tag "${IMAGE_NAME}-frontend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker tag "${IMAGE_NAME}-backend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/.gitlab-ci.yml + - src/$IMAGE_NAME/frontend/**/*.{py,in,yml} + - src/$IMAGE_NAME/frontend/Dockerfile + - src/$IMAGE_NAME/frontend/tests/*.py + - src/$IMAGE_NAME/backend/Dockerfile + - src/$IMAGE_NAME/backend/**/*.{py,in,yml} + - src/$IMAGE_NAME/backend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + +# Apply unit test to the component +unit_test telemetry-backend: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build telemetry + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + # - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + - > + docker run --name $IMAGE_NAME-backend -d -p 30060:30060 + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/backend/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG + - docker ps -a + - sleep 5 + - docker logs ${IMAGE_NAME}-backend + - > + docker exec -i ${IMAGE_NAME}-backend bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-backend_report.xml $IMAGE_NAME/backend/tests/test_*.py" + - docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + - docker rm -f ${IMAGE_NAME}-backend + - docker rm -f zookeeper + - docker rm -f kafka + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/backend/**/*.{py,in,yml} + - src/$IMAGE_NAME/backend/Dockerfile + - src/$IMAGE_NAME/backend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/backend/tests/${IMAGE_NAME}-backend_report.xml + +# Apply unit test to the component +unit_test telemetry-frontend: + variables: + IMAGE_NAME: 'telemetry' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build telemetry + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi + - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi + - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi + - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi + - docker container prune -f + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" + - docker pull "bitnami/zookeeper:latest" + - docker pull "bitnami/kafka:latest" + - docker pull "cockroachdb/cockroach:latest-v22.2" + - docker volume create crdb + - > + docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 + --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 + --volume "crdb:/cockroach/cockroach-data" + cockroachdb/cockroach:latest-v22.2 start-single-node + - echo "Waiting for initialization..." + - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done + # - docker logs crdb + # - docker ps -a + - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $CRDB_ADDRESS + - > + docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \ + -e ALLOW_ANONYMOUS_LOGIN=yes \ + bitnami/zookeeper:latest + - sleep 10 # Wait for Zookeeper to start + - docker run --name kafka -d --network=teraflowbridge -p 9092:9092 + --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + --env ALLOW_PLAINTEXT_LISTENER=yes + bitnami/kafka:latest + - sleep 20 # Wait for Kafka to start + - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $KAFKA_IP + # - docker logs zookeeper + # - docker logs kafka + - > + docker run --name $IMAGE_NAME-frontend -d -p 30050:30050 + --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" + --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" + --volume "$PWD/src/$IMAGE_NAME/frontend/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG + - docker ps -a + - sleep 5 + - docker logs ${IMAGE_NAME}-frontend + - > + docker exec -i ${IMAGE_NAME}-frontend bash -c + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_*.py" + - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker volume rm -f crdb + - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + - docker rm -f ${IMAGE_NAME}-frontend + - docker rm -f zookeeper + - docker rm -f kafka + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/frontend/**/*.{py,in,yml} + - src/$IMAGE_NAME/frontend/Dockerfile + - src/$IMAGE_NAME/frontend/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml \ No newline at end of file diff --git a/src/telemetry/backend/Dockerfile b/src/telemetry/backend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..eebfe24ab3ca457b9d05b02a07f4b28d6f196987 --- /dev/null +++ b/src/telemetry/backend/Dockerfile @@ -0,0 +1,69 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/telemetry/backend +WORKDIR /var/teraflow/telemetry/backend +COPY src/telemetry/backend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/telemetry/__init__.py telemetry/__init__.py +COPY src/telemetry/backend/. telemetry/backend/ + +# Start the service +ENTRYPOINT ["python", "-m", "telemetry.backend.service"] diff --git a/src/telemetry/frontend/tests/__init__.py b/src/telemetry/backend/requirements.in similarity index 96% rename from src/telemetry/frontend/tests/__init__.py rename to src/telemetry/backend/requirements.in index 3ee6f7071f145e06c3aeaefc09a43ccd88e619e3..e6a559be714faa31196206dbbdc53788506369b5 100644 --- a/src/telemetry/frontend/tests/__init__.py +++ b/src/telemetry/backend/requirements.in @@ -12,3 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. +confluent-kafka==2.3.* diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index d81be79dbe410ccbf2781816f34735f6bfe5639d..95662969be4f9191e5f3748490a6bc47167e6243 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -12,64 +12,56 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ast +import json import time import random import logging -import requests import threading -from typing import Any, Tuple -from common.proto.context_pb2 import Empty +from typing import Any, Dict +# from common.proto.context_pb2 import Empty from confluent_kafka import Producer as KafkaProducer from confluent_kafka import Consumer as KafkaConsumer -from confluent_kafka import KafkaException from confluent_kafka import KafkaError -from confluent_kafka.admin import AdminClient, NewTopic -from common.proto.telemetry_frontend_pb2 import Collector, CollectorId -from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from common.method_wrappers.Decorator import MetricsPool +from common.tools.service.GenericGrpcService import GenericGrpcService + + LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Telemetry', 'TelemetryBackend') -KAFKA_SERVER_IP = '127.0.0.1:9092' -# KAFKA_SERVER_IP = '10.152.183.175:30092' -ADMIN_KAFKA_CLIENT = AdminClient({'bootstrap.servers': KAFKA_SERVER_IP}) -KAFKA_TOPICS = {'request' : 'topic_request', 'response': 'topic_response', - 'raw' : 'topic_raw' , 'labeled' : 'topic_labeled'} -EXPORTER_ENDPOINT = "http://10.152.183.2:9100/metrics" -PRODUCER_CONFIG = {'bootstrap.servers': KAFKA_SERVER_IP,} +METRICS_POOL = MetricsPool('TelemetryBackend', 'backendService') +# EXPORTER_ENDPOINT = "http://10.152.183.2:9100/metrics" -class TelemetryBackendService: +class TelemetryBackendService(GenericGrpcService): """ - Class to listens for request on Kafka topic, fetches metrics and produces measured values to another Kafka topic. + Class listens for request on Kafka topic, fetches requested metrics from device. + Produces metrics on both RESPONSE and VALUE kafka topics. """ - - def __init__(self): + def __init__(self, cls_name : str = __name__) -> None: LOGGER.info('Init TelemetryBackendService') + port = get_service_port_grpc(ServiceNameEnum.TELEMETRYBACKEND) + super().__init__(port, cls_name=cls_name) + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'backend', + 'auto.offset.reset' : 'latest'}) self.running_threads = {} - - def run_kafka_listener(self)->bool: - threading.Thread(target=self.kafka_listener).start() - return True - - def kafka_listener(self): + + def install_servicers(self): + threading.Thread(target=self.RequestListener).start() + + def RequestListener(self): """ listener for requests on Kafka topic. """ - conusmer_configs = { - 'bootstrap.servers' : KAFKA_SERVER_IP, - 'group.id' : 'backend', - 'auto.offset.reset' : 'latest' - } - # topic_request = "topic_request" - consumerObj = KafkaConsumer(conusmer_configs) - # consumerObj.subscribe([topic_request]) - consumerObj.subscribe([KAFKA_TOPICS['request']]) - + consumer = self.kafka_consumer + consumer.subscribe([KafkaTopic.REQUEST.value]) while True: - receive_msg = consumerObj.poll(2.0) + receive_msg = consumer.poll(2.0) if receive_msg is None: - # print (time.time(), " - Telemetry backend is listening on Kafka Topic: ", KAFKA_TOPICS['request']) # added for debugging purposes continue elif receive_msg.error(): if receive_msg.error().code() == KafkaError._PARTITION_EOF: @@ -77,177 +69,175 @@ class TelemetryBackendService: else: print("Consumer error: {}".format(receive_msg.error())) break - (kpi_id, duration, interval) = ast.literal_eval(receive_msg.value().decode('utf-8')) + + collector = json.loads(receive_msg.value().decode('utf-8')) collector_id = receive_msg.key().decode('utf-8') - if duration == -1 and interval == -1: - self.terminate_collector_backend(collector_id) - # threading.Thread(target=self.terminate_collector_backend, args=(collector_id)) + LOGGER.debug('Recevied Collector: {:} - {:}'.format(collector_id, collector)) + print('Recevied Collector: {:} - {:}'.format(collector_id, collector)) + + if collector['duration'] == -1 and collector['interval'] == -1: + self.TerminateCollectorBackend(collector_id) else: - self.run_initiate_collector_backend(collector_id, kpi_id, duration, interval) + self.RunInitiateCollectorBackend(collector_id, collector) + def TerminateCollectorBackend(self, collector_id): + if collector_id in self.running_threads: + thread, stop_event = self.running_threads[collector_id] + stop_event.set() + thread.join() + print ("Terminating backend (by StopCollector): Collector Id: ", collector_id) + del self.running_threads[collector_id] + self.GenerateCollectorResponse(collector_id, "-1", -1) # Termination confirmation to frontend. + else: + print ('Backend collector {:} not found'.format(collector_id)) - def run_initiate_collector_backend(self, collector_id: str, kpi_id: str, duration: int, interval: int): + def RunInitiateCollectorBackend(self, collector_id: str, collector: str): stop_event = threading.Event() - thread = threading.Thread(target=self.initiate_collector_backend, - args=(collector_id, kpi_id, duration, interval, stop_event)) + thread = threading.Thread(target=self.InitiateCollectorBackend, + args=(collector_id, collector, stop_event)) self.running_threads[collector_id] = (thread, stop_event) thread.start() - def initiate_collector_backend(self, collector_id, kpi_id, duration, interval, stop_event - ): # type: ignore + def InitiateCollectorBackend(self, collector_id, collector, stop_event): """ - Method to receive collector request attribues and initiates collecter backend. + Method receives collector request and initiates collecter backend. """ print("Initiating backend for collector: ", collector_id) start_time = time.time() while not stop_event.is_set(): - if time.time() - start_time >= duration: # condition to terminate backend + if time.time() - start_time >= collector['duration']: # condition to terminate backend print("Execuation duration completed: Terminating backend: Collector Id: ", collector_id, " - ", time.time() - start_time) - self.generate_kafka_response(collector_id, "-1", -1) - # write to Kafka to send the termination confirmation. + self.GenerateCollectorResponse(collector_id, "-1", -1) # Termination confirmation to frontend. break - # print ("Received KPI: ", kpi_id, ", Duration: ", duration, ", Fetch Interval: ", interval) - self.extract_kpi_value(collector_id, kpi_id) - # print ("Telemetry Backend running for KPI: ", kpi_id, "after FETCH INTERVAL: ", interval) - time.sleep(interval) + self.ExtractKpiValue(collector_id, collector['kpi_id']) + time.sleep(collector['interval']) - def extract_kpi_value(self, collector_id: str, kpi_id: str): + def ExtractKpiValue(self, collector_id: str, kpi_id: str): """ Method to extract kpi value. """ - measured_kpi_value = random.randint(1,100) # Should be extracted from exporter/stream - # measured_kpi_value = self.fetch_node_exporter_metrics() # exporter extracted metric value against default KPI - self.generate_kafka_response(collector_id, kpi_id , measured_kpi_value) + measured_kpi_value = random.randint(1,100) # TODO: To be extracted from a device + print ("Measured Kpi value: {:}".format(measured_kpi_value)) + # measured_kpi_value = self.fetch_node_exporter_metrics() # exporter extracted metric value against default KPI + self.GenerateCollectorResponse(collector_id, kpi_id , measured_kpi_value) - def generate_kafka_response(self, collector_id: str, kpi_id: str, kpi_value: Any): + def GenerateCollectorResponse(self, collector_id: str, kpi_id: str, measured_kpi_value: Any): """ - Method to write response on Kafka topic + Method to write kpi value on RESPONSE Kafka topic """ - # topic_response = "topic_response" - msg_value : Tuple [str, Any] = (kpi_id, kpi_value) - msg_key = collector_id - producerObj = KafkaProducer(PRODUCER_CONFIG) - # producerObj.produce(topic_response, key=msg_key, value= str(msg_value), callback=self.delivery_callback) - producerObj.produce(KAFKA_TOPICS['response'], key=msg_key, value= str(msg_value), callback=TelemetryBackendService.delivery_callback) - producerObj.flush() - - def terminate_collector_backend(self, collector_id): - if collector_id in self.running_threads: - thread, stop_event = self.running_threads[collector_id] - stop_event.set() - thread.join() - print ("Terminating backend (by StopCollector): Collector Id: ", collector_id) - del self.running_threads[collector_id] - self.generate_kafka_response(collector_id, "-1", -1) + producer = self.kafka_producer + kpi_value : Dict = { + "kpi_id" : kpi_id, + "kpi_value" : measured_kpi_value + } + producer.produce( + KafkaTopic.RESPONSE.value, + key = collector_id, + value = json.dumps(kpi_value), + callback = self.delivery_callback + ) + producer.flush() - def create_topic_if_not_exists(self, new_topics: list) -> bool: - """ - Method to create Kafka topic if it does not exist. - Args: - admin_client (AdminClient): Kafka admin client. - """ - for topic in new_topics: - try: - topic_metadata = ADMIN_KAFKA_CLIENT.list_topics(timeout=5) - if topic not in topic_metadata.topics: - # If the topic does not exist, create a new topic - print(f"Topic '{topic}' does not exist. Creating...") - LOGGER.warning("Topic {:} does not exist. Creating...".format(topic)) - new_topic = NewTopic(topic, num_partitions=1, replication_factor=1) - ADMIN_KAFKA_CLIENT.create_topics([new_topic]) - except KafkaException as e: - print(f"Failed to create topic: {e}") - return False - return True - - @staticmethod - def delivery_callback( err, msg): + def GenerateRawMetric(self, metrics: Any): """ - Callback function to handle message delivery status. - Args: - err (KafkaError): Kafka error object. - msg (Message): Kafka message object. + Method writes raw metrics on VALUE Kafka topic """ - if err: - print(f'Message delivery failed: {err}') - else: - print(f'Message delivered to topic {msg.topic()}') + producer = self.kafka_producer + some_metric : Dict = { + "some_id" : metrics + } + producer.produce( + KafkaTopic.VALUE.value, + key = 'raw', + value = json.dumps(some_metric), + callback = self.delivery_callback + ) + producer.flush() -# ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter ----------- - @staticmethod - def fetch_single_node_exporter_metric(): - """ - Method to fetch metrics from Node Exporter. - Returns: - str: Metrics fetched from Node Exporter. + def delivery_callback(self, err, msg): """ - KPI = "node_network_receive_packets_total" - try: - response = requests.get(EXPORTER_ENDPOINT) # type: ignore - LOGGER.info("Request status {:}".format(response)) - if response.status_code == 200: - # print(f"Metrics fetched sucessfully...") - metrics = response.text - # Check if the desired metric is available in the response - if KPI in metrics: - KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI) - # Extract the metric value - if KPI_VALUE is not None: - LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE)) - print(f"Extracted value of {KPI} is: {KPI_VALUE}") - return KPI_VALUE - else: - LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code)) - # print(f"Failed to fetch metrics. Status code: {response.status_code}") - return None - except Exception as e: - LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) - # print(f"Failed to fetch metrics: {str(e)}") - return None - - @staticmethod - def extract_metric_value(metrics, metric_name): - """ - Method to extract the value of a metric from the metrics string. - Args: - metrics (str): Metrics string fetched from Exporter. - metric_name (str): Name of the metric to extract. - Returns: - float: Value of the extracted metric, or None if not found. - """ - try: - # Find the metric line containing the desired metric name - metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name)) - # Split the line to extract the metric value - metric_value = float(metric_line.split()[1]) - return metric_value - except StopIteration: - print(f"Metric '{metric_name}' not found in the metrics.") - return None - - @staticmethod - def stream_node_export_metrics_to_raw_topic(): - try: - while True: - response = requests.get(EXPORTER_ENDPOINT) - # print("Response Status {:} ".format(response)) - # LOGGER.info("Response Status {:} ".format(response)) - try: - if response.status_code == 200: - producerObj = KafkaProducer(PRODUCER_CONFIG) - producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback) - producerObj.flush() - LOGGER.info("Produce to topic") - else: - LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code)) - print(f"Didn't received expected response. Status code: {response.status_code}") - return None - time.sleep(15) - except Exception as e: - LOGGER.info("Failed to process response. Status code: {:}".format(e)) - return None - except Exception as e: - LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) - print(f"Failed to fetch metrics: {str(e)}") - return None -# ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter ----------- \ No newline at end of file + Callback function to handle message delivery status. + Args: err (KafkaError): Kafka error object. + msg (Message): Kafka message object. + """ + if err: print(f'Message delivery failed: {err}') + # else: print(f'Message delivered to topic {msg.topic()}') + +# # ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter ----------- +# @staticmethod +# def fetch_single_node_exporter_metric(): +# """ +# Method to fetch metrics from Node Exporter. +# Returns: +# str: Metrics fetched from Node Exporter. +# """ +# KPI = "node_network_receive_packets_total" +# try: +# response = requests.get(EXPORTER_ENDPOINT) # type: ignore +# LOGGER.info("Request status {:}".format(response)) +# if response.status_code == 200: +# # print(f"Metrics fetched sucessfully...") +# metrics = response.text +# # Check if the desired metric is available in the response +# if KPI in metrics: +# KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI) +# # Extract the metric value +# if KPI_VALUE is not None: +# LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE)) +# print(f"Extracted value of {KPI} is: {KPI_VALUE}") +# return KPI_VALUE +# else: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code)) +# # print(f"Failed to fetch metrics. Status code: {response.status_code}") +# return None +# except Exception as e: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) +# # print(f"Failed to fetch metrics: {str(e)}") +# return None + +# @staticmethod +# def extract_metric_value(metrics, metric_name): +# """ +# Method to extract the value of a metric from the metrics string. +# Args: +# metrics (str): Metrics string fetched from Exporter. +# metric_name (str): Name of the metric to extract. +# Returns: +# float: Value of the extracted metric, or None if not found. +# """ +# try: +# # Find the metric line containing the desired metric name +# metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name)) +# # Split the line to extract the metric value +# metric_value = float(metric_line.split()[1]) +# return metric_value +# except StopIteration: +# print(f"Metric '{metric_name}' not found in the metrics.") +# return None + +# @staticmethod +# def stream_node_export_metrics_to_raw_topic(): +# try: +# while True: +# response = requests.get(EXPORTER_ENDPOINT) +# # print("Response Status {:} ".format(response)) +# # LOGGER.info("Response Status {:} ".format(response)) +# try: +# if response.status_code == 200: +# producerObj = KafkaProducer(PRODUCER_CONFIG) +# producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback) +# producerObj.flush() +# LOGGER.info("Produce to topic") +# else: +# LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code)) +# print(f"Didn't received expected response. Status code: {response.status_code}") +# return None +# time.sleep(15) +# except Exception as e: +# LOGGER.info("Failed to process response. Status code: {:}".format(e)) +# return None +# except Exception as e: +# LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e)) +# print(f"Failed to fetch metrics: {str(e)}") +# return None +# # ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter ----------- \ No newline at end of file diff --git a/src/telemetry/backend/service/__main__.py b/src/telemetry/backend/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad86733141966894070b78b3ac227890293fa7c --- /dev/null +++ b/src/telemetry/backend/service/__main__.py @@ -0,0 +1,51 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from common.Settings import get_log_level +from .TelemetryBackendService import TelemetryBackendService + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level) + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.debug('Starting...') + + grpc_service = TelemetryBackendService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=1.0): pass + + LOGGER.debug('Terminating...') + grpc_service.stop() + + LOGGER.debug('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/telemetry/backend/tests/testTelemetryBackend.py b/src/telemetry/backend/tests/testTelemetryBackend.py deleted file mode 100644 index d832e54e77589ca677682760d19e68b1bd09b1f7..0000000000000000000000000000000000000000 --- a/src/telemetry/backend/tests/testTelemetryBackend.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -print (sys.path) -sys.path.append('/home/tfs/tfs-ctrl') -import threading -import logging -from typing import Tuple -# from common.proto.context_pb2 import Empty -from src.telemetry.backend.service.TelemetryBackendService import TelemetryBackendService - -LOGGER = logging.getLogger(__name__) - - -########################### -# Tests Implementation of Telemetry Backend -########################### - -def test_verify_kafka_topics(): - LOGGER.info('test_verify_kafka_topics requesting') - TelemetryBackendServiceObj = TelemetryBackendService() - KafkaTopics = ['topic_request', 'topic_response', 'topic_raw', 'topic_labled'] - response = TelemetryBackendServiceObj.create_topic_if_not_exists(KafkaTopics) - LOGGER.debug(str(response)) - assert isinstance(response, bool) - -# def test_run_kafka_listener(): -# LOGGER.info('test_receive_kafka_request requesting') -# TelemetryBackendServiceObj = TelemetryBackendService() -# response = TelemetryBackendServiceObj.run_kafka_listener() -# LOGGER.debug(str(response)) -# assert isinstance(response, bool) - -# def test_fetch_node_exporter_metrics(): -# LOGGER.info(' >>> test_fetch_node_exporter_metrics START <<< ') -# TelemetryBackendService.fetch_single_node_exporter_metric() - -def test_stream_node_export_metrics_to_raw_topic(): - LOGGER.info(' >>> test_stream_node_export_metrics_to_raw_topic START <<< ') - threading.Thread(target=TelemetryBackendService.stream_node_export_metrics_to_raw_topic, args=()).start() - diff --git a/src/telemetry/backend/tests/test_TelemetryBackend.py b/src/telemetry/backend/tests/test_TelemetryBackend.py new file mode 100644 index 0000000000000000000000000000000000000000..a2bbee540c3ce348ef52eceb0e776f48a68d94b1 --- /dev/null +++ b/src/telemetry/backend/tests/test_TelemetryBackend.py @@ -0,0 +1,38 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.tools.kafka.Variables import KafkaTopic +from telemetry.backend.service.TelemetryBackendService import TelemetryBackendService + + +LOGGER = logging.getLogger(__name__) + + +########################### +# Tests Implementation of Telemetry Backend +########################### + +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) + +def test_RunRequestListener(): + LOGGER.info('test_RunRequestListener') + TelemetryBackendServiceObj = TelemetryBackendService() + response = TelemetryBackendServiceObj.RunRequestListener() + LOGGER.debug(str(response)) + assert isinstance(response, bool) diff --git a/src/telemetry/database/TelemetryDBmanager.py b/src/telemetry/database/TelemetryDBmanager.py deleted file mode 100644 index b558180a9e1fbf85bf523c7faededf58f57e2264..0000000000000000000000000000000000000000 --- a/src/telemetry/database/TelemetryDBmanager.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, time -import sqlalchemy -from sqlalchemy import inspect, MetaData, Table -from sqlalchemy.orm import sessionmaker -from telemetry.database.TelemetryModel import Collector as CollectorModel -from telemetry.database.TelemetryModel import Kpi as KpiModel -from sqlalchemy.ext.declarative import declarative_base -from telemetry.database.TelemetryEngine import TelemetryEngine -from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId -from common.proto.telemetry_frontend_pb2 import Collector, CollectorId -from sqlalchemy.exc import SQLAlchemyError -from telemetry.database.TelemetryModel import Base - -LOGGER = logging.getLogger(__name__) -DB_NAME = "telemetryfrontend" - -class TelemetryDBmanager: - def __init__(self): - self.db_engine = TelemetryEngine.get_engine() - if self.db_engine is None: - LOGGER.error('Unable to get SQLAlchemy DB Engine...') - return False - self.db_name = DB_NAME - self.Session = sessionmaker(bind=self.db_engine) - - def create_database(self): - try: - # with self.db_engine.connect() as connection: - # connection.execute(f"CREATE DATABASE {self.db_name};") - TelemetryEngine.create_database(self.db_engine) - LOGGER.info('TelemetryDBmanager initalized DB Name: {:}'.format(self.db_name)) - return True - except Exception as e: # pylint: disable=bare-except # pragma: no cover - LOGGER.exception('Failed to check/create the database: {:s}'.format(str(e))) - return False - - def create_tables(self): - try: - Base.metadata.create_all(self.db_engine) # type: ignore - LOGGER.info("Tables created in database ({:}) the as per Models".format(self.db_name)) - except Exception as e: - LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e))) - - def verify_tables(self): - try: - with self.db_engine.connect() as connection: - result = connection.execute("SHOW TABLES;") - tables = result.fetchall() - LOGGER.info("Tables in DB: {:}".format(tables)) - except Exception as e: - LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) - - def drop_table(self, table_to_drop: str): - try: - inspector = inspect(self.db_engine) - existing_tables = inspector.get_table_names() - if table_to_drop in existing_tables: - table = Table(table_to_drop, MetaData(), autoload_with=self.db_engine) - table.drop(self.db_engine) - LOGGER.info("Tables delete in the DB Name: {:}".format(self.db_name)) - else: - LOGGER.warning("No table {:} in database {:} ".format(table_to_drop, DB_NAME)) - except Exception as e: - LOGGER.info("Tables cannot be deleted in the {:} database. {:s}".format(DB_NAME, str(e))) - - def list_databases(self): - query = "SHOW DATABASES" - with self.db_engine.connect() as connection: - result = connection.execute(query) - databases = [row[0] for row in result] - LOGGER.info("List of available DBs: {:}".format(databases)) - -# ------------------ INSERT METHODs -------------------------------------- - - def inser_kpi(self, request: KpiDescriptor): - session = self.Session() - try: - # Create a new Kpi instance - kpi_to_insert = KpiModel() - kpi_to_insert.kpi_id = request.kpi_id.kpi_id.uuid - kpi_to_insert.kpi_description = request.kpi_description - kpi_to_insert.kpi_sample_type = request.kpi_sample_type - kpi_to_insert.device_id = request.service_id.service_uuid.uuid - kpi_to_insert.endpoint_id = request.device_id.device_uuid.uuid - kpi_to_insert.service_id = request.slice_id.slice_uuid.uuid - kpi_to_insert.slice_id = request.endpoint_id.endpoint_uuid.uuid - kpi_to_insert.connection_id = request.connection_id.connection_uuid.uuid - # kpi_to_insert.link_id = request.link_id.link_id.uuid - # Add the instance to the session - session.add(kpi_to_insert) - session.commit() - LOGGER.info("Row inserted into kpi table: {:}".format(kpi_to_insert.kpi_id)) - except Exception as e: - session.rollback() - LOGGER.info("Failed to insert new kpi. {:s}".format(str(e))) - finally: - # Close the session - session.close() - - # Function to insert a row into the Collector model - def insert_collector(self, request: Collector): - session = self.Session() - try: - # Create a new Collector instance - collector_to_insert = CollectorModel() - collector_to_insert.collector_id = request.collector_id.collector_id.uuid - collector_to_insert.kpi_id = request.kpi_id.kpi_id.uuid - collector_to_insert.collector = "Test collector description" - collector_to_insert.sampling_duration_s = request.duration_s - collector_to_insert.sampling_interval_s = request.interval_s - collector_to_insert.start_timestamp = time.time() - collector_to_insert.end_timestamp = time.time() - - session.add(collector_to_insert) - session.commit() - LOGGER.info("Row inserted into collector table: {:}".format(collector_to_insert.collector_id)) - except Exception as e: - session.rollback() - LOGGER.info("Failed to insert new collector. {:s}".format(str(e))) - finally: - # Close the session - session.close() - -# ------------------ GET METHODs -------------------------------------- - - def get_kpi_descriptor(self, request: KpiId): - session = self.Session() - try: - kpi_id_to_search = request.kpi_id.uuid - kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_search).first() - if kpi: - LOGGER.info("kpi ID found: {:s}".format(str(kpi))) - return kpi - else: - LOGGER.warning("Kpi ID not found {:s}".format(str(kpi_id_to_search))) - return None - except Exception as e: - session.rollback() - LOGGER.info("Failed to retrieve KPI ID. {:s}".format(str(e))) - raise - finally: - session.close() - - def get_collector(self, request: CollectorId): - session = self.Session() - try: - collector_id_to_search = request.collector_id.uuid - collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_search).first() - if collector: - LOGGER.info("collector ID found: {:s}".format(str(collector))) - return collector - else: - LOGGER.warning("collector ID not found{:s}".format(str(collector_id_to_search))) - return None - except Exception as e: - session.rollback() - LOGGER.info("Failed to retrieve collector ID. {:s}".format(str(e))) - raise - finally: - session.close() - - # ------------------ SELECT METHODs -------------------------------------- - - def select_kpi_descriptor(self, **filters): - session = self.Session() - try: - query = session.query(KpiModel) - for column, value in filters.items(): - query = query.filter(getattr(KpiModel, column) == value) - result = query.all() - if len(result) != 0: - LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result))) - else: - LOGGER.warning("No matching row found : {:s}".format(str(result))) - return result - except SQLAlchemyError as e: - LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e)) - return [] - finally: - session.close() - - def select_collector(self, **filters): - session = self.Session() - try: - query = session.query(CollectorModel) - for column, value in filters.items(): - query = query.filter(getattr(CollectorModel, column) == value) - result = query.all() - if len(result) != 0: - LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result))) - else: - LOGGER.warning("No matching row found : {:s}".format(str(result))) - return result - except SQLAlchemyError as e: - LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e)) - return [] - finally: - session.close() - -# ------------------ DELETE METHODs -------------------------------------- - - def delete_kpi_descriptor(self, request: KpiId): - session = self.Session() - try: - kpi_id_to_delete = request.kpi_id.uuid - kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_delete).first() - if kpi: - session.delete(kpi) - session.commit() - LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id_to_delete) - else: - LOGGER.warning("KPI with kpi_id %s not found", kpi_id_to_delete) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id_to_delete, e) - finally: - session.close() - - def delete_collector(self, request: CollectorId): - session = self.Session() - try: - collector_id_to_delete = request.collector_id.uuid - collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_delete).first() - if collector: - session.delete(collector) - session.commit() - LOGGER.info("Deleted collector with collector_id: %s", collector_id_to_delete) - else: - LOGGER.warning("collector with collector_id %s not found", collector_id_to_delete) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting collector with collector_id %s: %s", collector_id_to_delete, e) - finally: - session.close() \ No newline at end of file diff --git a/src/telemetry/database/TelemetryEngine.py b/src/telemetry/database/TelemetryEngine.py index a563fa09f94c812aed07d0aa3cbd5bc988737fc4..18ec2ddbc671302b642db04b673038659da7acde 100644 --- a/src/telemetry/database/TelemetryEngine.py +++ b/src/telemetry/database/TelemetryEngine.py @@ -12,48 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, sqlalchemy, sqlalchemy_utils -# from common.Settings import get_setting +import logging, sqlalchemy +from common.Settings import get_setting LOGGER = logging.getLogger(__name__) -APP_NAME = 'tfs' -ECHO = False # False: No dump SQL commands and transactions executed -CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}' -# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' +# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}' +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' class TelemetryEngine: - # def __init__(self): - # self.engine = self.get_engine() @staticmethod def get_engine() -> sqlalchemy.engine.Engine: - CRDB_NAMESPACE = "crdb" - CRDB_SQL_PORT = "26257" - CRDB_DATABASE = "telemetryfrontend" - CRDB_USERNAME = "tfs" - CRDB_PASSWORD = "tfs123" - CRDB_SSLMODE = "require" - crdb_uri = CRDB_URI_TEMPLATE.format( - CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) - # crdb_uri = CRDB_URI_TEMPLATE.format( - # CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = "crdb" + CRDB_SQL_PORT = "26257" + CRDB_DATABASE = "tfs-telemetry" + CRDB_USERNAME = "tfs" + CRDB_PASSWORD = "tfs123" + CRDB_SSLMODE = "require" + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) try: - # engine = sqlalchemy.create_engine( - # crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True) engine = sqlalchemy.create_engine(crdb_uri, echo=False) - LOGGER.info(' TelemetryDBmanager initalized with DB URL: {:}'.format(crdb_uri)) + LOGGER.info(' TelemetryDB initalized with DB URL: {:}'.format(crdb_uri)) except: # pylint: disable=bare-except # pragma: no cover LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) return None # type: ignore return engine # type: ignore - - @staticmethod - def create_database(engine : sqlalchemy.engine.Engine) -> None: - if not sqlalchemy_utils.database_exists(engine.url): - LOGGER.info("Database created. {:}".format(engine.url)) - sqlalchemy_utils.create_database(engine.url) - - @staticmethod - def drop_database(engine : sqlalchemy.engine.Engine) -> None: - if sqlalchemy_utils.database_exists(engine.url): - sqlalchemy_utils.drop_database(engine.url) diff --git a/src/telemetry/database/TelemetryModel.py b/src/telemetry/database/TelemetryModel.py index be4f0969c86638520cf226b8e42db90426165804..4e71ce8138af39e51c80791dbd6683d855231d7b 100644 --- a/src/telemetry/database/TelemetryModel.py +++ b/src/telemetry/database/TelemetryModel.py @@ -14,32 +14,60 @@ import logging from sqlalchemy.dialects.postgresql import UUID -from sqlalchemy import Column, Integer, String, Float, Text, ForeignKey -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker, relationship +from sqlalchemy import Column, String, Float from sqlalchemy.orm import registry +from common.proto import telemetry_frontend_pb2 logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) # Create a base class for declarative models Base = registry().generate_base() -# Base = declarative_base() class Collector(Base): __tablename__ = 'collector' collector_id = Column(UUID(as_uuid=False), primary_key=True) - kpi_id = Column(UUID(as_uuid=False)) - collector_decription = Column(String) - sampling_duration_s = Column(Float) - sampling_interval_s = Column(Float) - start_timestamp = Column(Float) - end_timestamp = Column(Float) - + kpi_id = Column(UUID(as_uuid=False), nullable=False) + sampling_duration_s = Column(Float , nullable=False) + sampling_interval_s = Column(Float , nullable=False) + start_timestamp = Column(Float , nullable=False) + end_timestamp = Column(Float , nullable=False) + # helps in logging the information def __repr__(self): - return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', " - f"collector='{self.collector_decription}', sampling_duration_s='{self.sampling_duration_s}', " - f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', " - f"end_timestamp='{self.end_timestamp}')>") \ No newline at end of file + return (f"<Collector(collector_id='{self.collector_id}' , kpi_id='{self.kpi_id}', " + f"sampling_duration_s='{self.sampling_duration_s}', sampling_interval_s='{self.sampling_interval_s}'," + f"start_timestamp='{self.start_timestamp}' , end_timestamp='{self.end_timestamp}')>") + + @classmethod + def ConvertCollectorToRow(cls, request): + """ + Create an instance of Collector table rows from a request object. + Args: request: The request object containing collector gRPC message. + Returns: A row (an instance of Collector table) initialized with content of the request. + """ + return cls( + collector_id = request.collector_id.collector_id.uuid, + kpi_id = request.kpi_id.kpi_id.uuid, + sampling_duration_s = request.duration_s, + sampling_interval_s = request.interval_s, + start_timestamp = request.start_time.timestamp, + end_timestamp = request.end_time.timestamp + ) + + @classmethod + def ConvertRowToCollector(cls, row): + """ + Create and return a dictionary representation of a Collector table instance. + Args: row: The Collector table instance (row) containing the data. + Returns: collector gRPC message initialized with the content of a row. + """ + response = telemetry_frontend_pb2.Collector() + response.collector_id.collector_id.uuid = row.collector_id + response.kpi_id.kpi_id.uuid = row.kpi_id + response.duration_s = row.sampling_duration_s + response.interval_s = row.sampling_interval_s + response.start_time.timestamp = row.start_timestamp + response.end_time.timestamp = row.end_timestamp + return response diff --git a/src/telemetry/database/Telemetry_DB.py b/src/telemetry/database/Telemetry_DB.py new file mode 100644 index 0000000000000000000000000000000000000000..32acfd73a410a7bfddd6b487d0b1962afadb3842 --- /dev/null +++ b/src/telemetry/database/Telemetry_DB.py @@ -0,0 +1,137 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sqlalchemy_utils +from sqlalchemy import inspect +from sqlalchemy.orm import sessionmaker +from telemetry.database.TelemetryModel import Collector as CollectorModel +from telemetry.database.TelemetryEngine import TelemetryEngine +from common.method_wrappers.ServiceExceptions import ( + OperationFailedException, AlreadyExistsException ) + +LOGGER = logging.getLogger(__name__) +DB_NAME = "tfs_telemetry" + +class TelemetryDB: + def __init__(self): + self.db_engine = TelemetryEngine.get_engine() + if self.db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return False + self.db_name = DB_NAME + self.Session = sessionmaker(bind=self.db_engine) + + def create_database(self): + if not sqlalchemy_utils.database_exists(self.db_engine.url): + LOGGER.debug("Database created. {:}".format(self.db_engine.url)) + sqlalchemy_utils.create_database(self.db_engine.url) + + def drop_database(self) -> None: + if sqlalchemy_utils.database_exists(self.db_engine.url): + sqlalchemy_utils.drop_database(self.db_engine.url) + + def create_tables(self): + try: + CollectorModel.metadata.create_all(self.db_engine) # type: ignore + LOGGER.debug("Tables created in the database: {:}".format(self.db_name)) + except Exception as e: + LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e))) + raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)]) + + def verify_tables(self): + try: + inspect_object = inspect(self.db_engine) + if(inspect_object.has_table('collector', None)): + LOGGER.info("Table exists in DB: {:}".format(self.db_name)) + except Exception as e: + LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) + +# ----------------- CURD METHODs --------------------- + + def add_row_to_db(self, row): + session = self.Session() + try: + session.add(row) + session.commit() + LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.") + return True + except Exception as e: + session.rollback() + if "psycopg2.errors.UniqueViolation" in str(e): + LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}") + raise AlreadyExistsException(row.__class__.__name__, row, + extra_details=["Unique key voilation: {:}".format(e)] ) + else: + LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def search_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + entity = session.query(model).filter_by(**{col_name: id_to_search}).first() + if entity: + # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}") + return entity + else: + LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}") + print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search)) + return None + except Exception as e: + session.rollback() + LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}") + raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)]) + finally: + session.close() + + def delete_db_row_by_id(self, model, col_name, id_to_search): + session = self.Session() + try: + record = session.query(model).filter_by(**{col_name: id_to_search}).first() + if record: + session.delete(record) + session.commit() + LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) + else: + LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search) + return None + except Exception as e: + session.rollback() + LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) + raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)]) + finally: + session.close() + + def select_with_filter(self, model, filter_object): + session = self.Session() + try: + query = session.query(CollectorModel) + # Apply filters based on the filter_object + if filter_object.kpi_id: + query = query.filter(CollectorModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id])) + result = query.all() + # query should be added to return all rows + if result: + LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") # - Results: {result} + else: + LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}") + return result + except Exception as e: + LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}") + raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)]) + finally: + session.close() + diff --git a/src/telemetry/database/managementDB.py b/src/telemetry/database/managementDB.py deleted file mode 100644 index f79126f279d7bbece6c08ae5eb1cd74e340d1c7d..0000000000000000000000000000000000000000 --- a/src/telemetry/database/managementDB.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, time -import sqlalchemy -import sqlalchemy_utils -from sqlalchemy.orm import sessionmaker -from sqlalchemy.ext.declarative import declarative_base -from telemetry.database.TelemetryEngine import TelemetryEngine -from telemetry.database.TelemetryModel import Base - -LOGGER = logging.getLogger(__name__) -DB_NAME = "telemetryfrontend" - -# # Create a base class for declarative models -# Base = declarative_base() - -class managementDB: - def __init__(self): - self.db_engine = TelemetryEngine.get_engine() - if self.db_engine is None: - LOGGER.error('Unable to get SQLAlchemy DB Engine...') - return False - self.db_name = DB_NAME - self.Session = sessionmaker(bind=self.db_engine) - - @staticmethod - def create_database(engine : sqlalchemy.engine.Engine) -> None: - if not sqlalchemy_utils.database_exists(engine.url): - LOGGER.info("Database created. {:}".format(engine.url)) - sqlalchemy_utils.create_database(engine.url) - - @staticmethod - def drop_database(engine : sqlalchemy.engine.Engine) -> None: - if sqlalchemy_utils.database_exists(engine.url): - sqlalchemy_utils.drop_database(engine.url) - - # def create_database(self): - # try: - # with self.db_engine.connect() as connection: - # connection.execute(f"CREATE DATABASE {self.db_name};") - # LOGGER.info('managementDB initalizes database. Name: {self.db_name}') - # return True - # except: - # LOGGER.exception('Failed to check/create the database: {:s}'.format(str(self.db_engine.url))) - # return False - - @staticmethod - def create_tables(engine : sqlalchemy.engine.Engine): - try: - Base.metadata.create_all(engine) # type: ignore - LOGGER.info("Tables created in the DB Name: {:}".format(DB_NAME)) - except Exception as e: - LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e))) - - def verify_tables(self): - try: - with self.db_engine.connect() as connection: - result = connection.execute("SHOW TABLES;") - tables = result.fetchall() # type: ignore - LOGGER.info("Tables verified: {:}".format(tables)) - except Exception as e: - LOGGER.info("Unable to fetch Table names. {:s}".format(str(e))) - - @staticmethod - def add_row_to_db(self, row): - session = self.Session() - try: - session.add(row) - session.commit() - LOGGER.info(f"Row inserted into {row.__class__.__name__} table.") - except Exception as e: - session.rollback() - LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}") - finally: - session.close() - - def search_db_row_by_id(self, model, col_name, id_to_search): - session = self.Session() - try: - entity = session.query(model).filter_by(**{col_name: id_to_search}).first() - if entity: - LOGGER.info(f"{model.__name__} ID found: {str(entity)}") - return entity - else: - LOGGER.warning(f"{model.__name__} ID not found: {str(id_to_search)}") - return None - except Exception as e: - session.rollback() - LOGGER.info(f"Failed to retrieve {model.__name__} ID. {str(e)}") - raise - finally: - session.close() - - def delete_db_row_by_id(self, model, col_name, id_to_search): - session = self.Session() - try: - record = session.query(model).filter_by(**{col_name: id_to_search}).first() - if record: - session.delete(record) - session.commit() - LOGGER.info("Deleted %s with %s: %s", model.__name__, col_name, id_to_search) - else: - LOGGER.warning("%s with %s %s not found", model.__name__, col_name, id_to_search) - except Exception as e: - session.rollback() - LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e) - finally: - session.close() - - def select_with_filter(self, model, **filters): - session = self.Session() - try: - query = session.query(model) - for column, value in filters.items(): - query = query.filter(getattr(model, column) == value) # type: ignore - result = query.all() - if result: - LOGGER.info(f"Fetched filtered rows from {model.__name__} table with filters: {filters}") # - Results: {result} - else: - LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filters}") - return result - except Exception as e: - LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filters} ::: {e}") - return [] - finally: - session.close() \ No newline at end of file diff --git a/src/telemetry/database/tests/managementDBtests.py b/src/telemetry/database/tests/managementDBtests.py deleted file mode 100644 index 24138abe42be742bd9b16d7840343f9d7c7fe133..0000000000000000000000000000000000000000 --- a/src/telemetry/database/tests/managementDBtests.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from telemetry.database.managementDB import managementDB -from telemetry.database.tests.messages import create_collector_model_object - - -def test_add_row_to_db(): - managementDBobj = managementDB() - managementDBobj.add_row_to_db(create_collector_model_object()) \ No newline at end of file diff --git a/src/telemetry/database/tests/telemetryDBtests.py b/src/telemetry/database/tests/telemetryDBtests.py deleted file mode 100644 index 0d221106419d6e4ee4b313adf10c90c5e6be7666..0000000000000000000000000000000000000000 --- a/src/telemetry/database/tests/telemetryDBtests.py +++ /dev/null @@ -1,86 +0,0 @@ - -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Any -from sqlalchemy.ext.declarative import declarative_base -from telemetry.database.TelemetryDBmanager import TelemetryDBmanager -from telemetry.database.TelemetryEngine import TelemetryEngine -from telemetry.database.tests import temp_DB -from .messages import create_kpi_request, create_collector_request, \ - create_kpi_id_request, create_kpi_filter_request, \ - create_collector_id_request, create_collector_filter_request - -logging.basicConfig(level=logging.INFO) -LOGGER = logging.getLogger(__name__) - - -# def test_temp_DB(): -# temp_DB.main() - -def test_telemetry_object_creation(): - LOGGER.info('--- test_telemetry_object_creation: START') - - LOGGER.info('>>> Creating TelemetryDBmanager Object <<< ') - TelemetryDBmanagerObj = TelemetryDBmanager() - TelemetryEngine.create_database(TelemetryDBmanagerObj.db_engine) # creates 'frontend' db, if it doesnot exists. - - LOGGER.info('>>> Creating database <<< ') - TelemetryDBmanagerObj.create_database() - - LOGGER.info('>>> verifing database <<< ') - TelemetryDBmanagerObj.list_databases() - - # # LOGGER.info('>>> Droping Tables: ') - # # TelemetryDBmanagerObj.drop_table("table_naem_here") - - LOGGER.info('>>> Creating Tables <<< ') - TelemetryDBmanagerObj.create_tables() - - LOGGER.info('>>> Verifing Table creation <<< ') - TelemetryDBmanagerObj.verify_tables() - - # LOGGER.info('>>> TESTING: Row Insertion Operation: kpi Table <<<') - # kpi_obj = create_kpi_request() - # TelemetryDBmanagerObj.inser_kpi(kpi_obj) - - # LOGGER.info('>>> TESTING: Row Insertion Operation: collector Table <<<') - # collector_obj = create_collector_request() - # TelemetryDBmanagerObj.insert_collector(collector_obj) - - # LOGGER.info('>>> TESTING: Get KpiDescriptor <<<') - # kpi_id_obj = create_kpi_id_request() - # TelemetryDBmanagerObj.get_kpi_descriptor(kpi_id_obj) - - # LOGGER.info('>>> TESTING: Select Collector <<<') - # collector_id_obj = create_collector_id_request() - # TelemetryDBmanagerObj.get_collector(collector_id_obj) - - # LOGGER.info('>>> TESTING: Applying kpi filter <<< ') - # kpi_filter : dict[str, Any] = create_kpi_filter_request() - # TelemetryDBmanagerObj.select_kpi_descriptor(**kpi_filter) - - # LOGGER.info('>>> TESTING: Applying collector filter <<<') - # collector_filter : dict[str, Any] = create_collector_filter_request() - # TelemetryDBmanagerObj.select_collector(**collector_filter) - - # LOGGER.info('>>> TESTING: Delete KpiDescriptor ') - # kpi_id_obj = create_kpi_id_request() - # TelemetryDBmanagerObj.delete_kpi_descriptor(kpi_id_obj) - - # LOGGER.info('>>> TESTING: Delete Collector ') - # collector_id_obj = create_collector_id_request() - # TelemetryDBmanagerObj.delete_collector(collector_id_obj) - \ No newline at end of file diff --git a/src/telemetry/database/tests/temp_DB.py b/src/telemetry/database/tests/temp_DB.py deleted file mode 100644 index 089d3542492c2da87b839416f7118749bb82caad..0000000000000000000000000000000000000000 --- a/src/telemetry/database/tests/temp_DB.py +++ /dev/null @@ -1,327 +0,0 @@ -from sqlalchemy import create_engine, Column, String, Integer, Text, Float, ForeignKey -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker, relationship -from sqlalchemy.dialects.postgresql import UUID -import logging - -LOGGER = logging.getLogger(__name__) -Base = declarative_base() - -class Kpi(Base): - __tablename__ = 'kpi' - - kpi_id = Column(UUID(as_uuid=False), primary_key=True) - kpi_description = Column(Text) - kpi_sample_type = Column(Integer) - device_id = Column(String) - endpoint_id = Column(String) - service_id = Column(String) - slice_id = Column(String) - connection_id = Column(String) - link_id = Column(String) - - collectors = relationship('Collector', back_populates='kpi') - - def __repr__(self): - return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', " - f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', " - f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', " - f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', " - f"link_id='{self.link_id}')>") - -class Collector(Base): - __tablename__ = 'collector' - - collector_id = Column(UUID(as_uuid=False), primary_key=True) - kpi_id = Column(UUID(as_uuid=False), ForeignKey('kpi.kpi_id')) - collector = Column(String) - sampling_duration_s = Column(Float) - sampling_interval_s = Column(Float) - start_timestamp = Column(Float) - end_timestamp = Column(Float) - - kpi = relationship('Kpi', back_populates='collectors') - - def __repr__(self): - return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', " - f"collector='{self.collector}', sampling_duration_s='{self.sampling_duration_s}', " - f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', " - f"end_timestamp='{self.end_timestamp}')>") - -class DatabaseManager: - def __init__(self, db_url, db_name): - self.engine = create_engine(db_url) - self.db_name = db_name - self.Session = sessionmaker(bind=self.engine) - LOGGER.info("DatabaseManager initialized with DB URL: %s and DB Name: %s", db_url, db_name) - - def create_database(self): - try: - with self.engine.connect() as connection: - connection.execute(f"CREATE DATABASE {self.db_name};") - LOGGER.info("Database '%s' created successfully.", self.db_name) - except Exception as e: - LOGGER.error("Error creating database '%s': %s", self.db_name, e) - finally: - LOGGER.info("create_database method execution finished.") - - def create_tables(self): - try: - Base.metadata.create_all(self.engine) - LOGGER.info("Tables created successfully.") - except Exception as e: - LOGGER.error("Error creating tables: %s", e) - finally: - LOGGER.info("create_tables method execution finished.") - - def verify_table_creation(self): - try: - with self.engine.connect() as connection: - result = connection.execute("SHOW TABLES;") - tables = result.fetchall() - LOGGER.info("Tables verified: %s", tables) - return tables - except Exception as e: - LOGGER.error("Error verifying table creation: %s", e) - return [] - finally: - LOGGER.info("verify_table_creation method execution finished.") - - def insert_row_kpi(self, kpi_data): - session = self.Session() - try: - new_kpi = Kpi(**kpi_data) - session.add(new_kpi) - session.commit() - LOGGER.info("Inserted row into KPI table: %s", kpi_data) - except Exception as e: - session.rollback() - LOGGER.error("Error inserting row into KPI table: %s", e) - finally: - session.close() - LOGGER.info("insert_row_kpi method execution finished.") - - def insert_row_collector(self, collector_data): - session = self.Session() - try: - new_collector = Collector(**collector_data) - session.add(new_collector) - session.commit() - LOGGER.info("Inserted row into Collector table: %s", collector_data) - except Exception as e: - session.rollback() - LOGGER.error("Error inserting row into Collector table: %s", e) - finally: - session.close() - LOGGER.info("insert_row_collector method execution finished.") - - def verify_insertion_kpi(self, kpi_id): - session = self.Session() - try: - kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first() - LOGGER.info("Verified insertion in KPI table for kpi_id: %s, Result: %s", kpi_id, kpi) - return kpi - except Exception as e: - LOGGER.error("Error verifying insertion in KPI table for kpi_id %s: %s", kpi_id, e) - return None - finally: - session.close() - LOGGER.info("verify_insertion_kpi method execution finished.") - - def verify_insertion_collector(self, collector_id): - session = self.Session() - try: - collector = session.query(Collector).filter_by(collector_id=collector_id).first() - LOGGER.info("Verified insertion in Collector table for collector_id: %s, Result: %s", collector_id, collector) - return collector - except Exception as e: - LOGGER.error("Error verifying insertion in Collector table for collector_id %s: %s", collector_id, e) - return None - finally: - session.close() - LOGGER.info("verify_insertion_collector method execution finished.") - - def get_all_kpi_rows(self): - session = self.Session() - try: - kpi_rows = session.query(Kpi).all() - LOGGER.info("Fetched all rows from KPI table: %s", kpi_rows) - return kpi_rows - except Exception as e: - LOGGER.error("Error fetching all rows from KPI table: %s", e) - return [] - finally: - session.close() - LOGGER.info("get_all_kpi_rows method execution finished.") - - def get_all_collector_rows(self): - session = self.Session() - try: - collector_rows = session.query(Collector).all() - LOGGER.info("Fetched all rows from Collector table: %s", collector_rows) - return collector_rows - except Exception as e: - LOGGER.error("Error fetching all rows from Collector table: %s", e) - return [] - finally: - session.close() - LOGGER.info("get_all_collector_rows method execution finished.") - - def get_filtered_kpi_rows(self, **filters): - session = self.Session() - try: - query = session.query(Kpi) - for column, value in filters.items(): - query = query.filter(getattr(Kpi, column) == value) - result = query.all() - LOGGER.info("Fetched filtered rows from KPI table with filters ---------- : {:s}".format(str(result))) - return result - except NoResultFound: - LOGGER.warning("No results found in KPI table with filters %s", filters) - return [] - except Exception as e: - LOGGER.error("Error fetching filtered rows from KPI table with filters %s: %s", filters, e) - return [] - finally: - session.close() - LOGGER.info("get_filtered_kpi_rows method execution finished.") - - def get_filtered_collector_rows(self, **filters): - session = self.Session() - try: - query = session.query(Collector) - for column, value in filters.items(): - query = query.filter(getattr(Collector, column) == value) - result = query.all() - LOGGER.info("Fetched filtered rows from Collector table with filters %s: %s", filters, result) - return result - except NoResultFound: - LOGGER.warning("No results found in Collector table with filters %s", filters) - return [] - except Exception as e: - LOGGER.error("Error fetching filtered rows from Collector table with filters %s: %s", filters, e) - return [] - finally: - session.close() - LOGGER.info("get_filtered_collector_rows method execution finished.") - - def delete_kpi_by_id(self, kpi_id): - session = self.Session() - try: - kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first() - if kpi: - session.delete(kpi) - session.commit() - LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id) - else: - LOGGER.warning("KPI with kpi_id %s not found", kpi_id) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id, e) - finally: - session.close() - LOGGER.info("delete_kpi_by_id method execution finished.") - - def delete_collector_by_id(self, collector_id): - session = self.Session() - try: - collector = session.query(Collector).filter_by(collector_id=collector_id).first() - if collector: - session.delete(collector) - session.commit() - LOGGER.info("Deleted Collector with collector_id: %s", collector_id) - else: - LOGGER.warning("Collector with collector_id %s not found", collector_id) - except SQLAlchemyError as e: - session.rollback() - LOGGER.error("Error deleting Collector with collector_id %s: %s", collector_id, e) - finally: - session.close() - LOGGER.info("delete_collector_by_id method execution finished.") - - -# Example Usage -def main(): - CRDB_SQL_PORT = "26257" - CRDB_DATABASE = "telemetryfrontend" - CRDB_USERNAME = "tfs" - CRDB_PASSWORD = "tfs123" - CRDB_SSLMODE = "require" - CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}' - crdb_uri = CRDB_URI_TEMPLATE.format( - CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) - # db_url = "cockroachdb://username:password@localhost:26257/" - # db_name = "yourdatabase" - db_manager = DatabaseManager(crdb_uri, CRDB_DATABASE) - - # Create database - db_manager.create_database() - - # Update db_url to include the new database name - db_manager.engine = create_engine(f"{crdb_uri}") - db_manager.Session = sessionmaker(bind=db_manager.engine) - - # Create tables - db_manager.create_tables() - - # Verify table creation - tables = db_manager.verify_table_creation() - LOGGER.info('Tables in the database: {:s}'.format(str(tables))) - - # Insert a row into the KPI table - kpi_data = { - 'kpi_id': '123e4567-e89b-12d3-a456-426614174100', - 'kpi_description': 'Sample KPI', - 'kpi_sample_type': 1, - 'device_id': 'device_1', - 'endpoint_id': 'endpoint_1', - 'service_id': 'service_1', - 'slice_id': 'slice_1', - 'connection_id': 'conn_1', - 'link_id': 'link_1' - } - db_manager.insert_row_kpi(kpi_data) - - # Insert a row into the Collector table - collector_data = { - 'collector_id': '123e4567-e89b-12d3-a456-426614174101', - 'kpi_id': '123e4567-e89b-12d3-a456-426614174000', - 'collector': 'Collector 1', - 'sampling_duration_s': 60.0, - 'sampling_interval_s': 10.0, - 'start_timestamp': 1625247600.0, - 'end_timestamp': 1625247660.0 - } - db_manager.insert_row_collector(collector_data) - - # Verify insertion into KPI table - kpi = db_manager.verify_insertion_kpi('123e4567-e89b-12d3-a456-426614174000') - print("Inserted KPI:", kpi) - - # Verify insertion into Collector table - collector = db_manager.verify_insertion_collector('123e4567-e89b-12d3-a456-426614174001') - print("Inserted Collector:", collector) - - # Get all rows from KPI table - all_kpi_rows = db_manager.get_all_kpi_rows() - LOGGER.info("All KPI Rows: %s", all_kpi_rows) - - # Get all rows from Collector table - all_collector_rows = db_manager.get_all_collector_rows() - LOGGER.info("All Collector Rows: %s", all_collector_rows) - - # Get filtered rows from KPI table - filtered_kpi_rows = db_manager.get_filtered_kpi_rows(kpi_description='Sample KPI') - LOGGER.info("Filtered KPI Rows: %s", filtered_kpi_rows) - - # Get filtered rows from Collector table - filtered_collector_rows = db_manager.get_filtered_collector_rows(collector='Collector 1') - LOGGER.info("Filtered Collector Rows: %s", filtered_collector_rows) - - # Delete a KPI by kpi_id - kpi_id_to_delete = '123e4567-e89b-12d3-a456-426614174000' - db_manager.delete_kpi_by_id(kpi_id_to_delete) - - # Delete a Collector by collector_id - collector_id_to_delete = '123e4567-e89b-12d3-a456-426614174001' - db_manager.delete_collector_by_id(collector_id_to_delete) diff --git a/src/telemetry/frontend/Dockerfile b/src/telemetry/frontend/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7125d31fe74f7c44a52c2783369c2dc7a4a31160 --- /dev/null +++ b/src/telemetry/frontend/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/telemetry/frontend +WORKDIR /var/teraflow/telemetry/frontend +COPY src/telemetry/frontend/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/telemetry/__init__.py telemetry/__init__.py +COPY src/telemetry/frontend/. telemetry/frontend/ +COPY src/telemetry/database/. telemetry/database/ + +# Start the service +ENTRYPOINT ["python", "-m", "telemetry.frontend.service"] diff --git a/src/telemetry/database/tests/__init__.py b/src/telemetry/frontend/requirements.in similarity index 79% rename from src/telemetry/database/tests/__init__.py rename to src/telemetry/frontend/requirements.in index 839e45e3b646bc60de7edd81fcfb91b7b38feadf..231dc04e820387c95ffea72cbe67b9f0a9a0865a 100644 --- a/src/telemetry/database/tests/__init__.py +++ b/src/telemetry/frontend/requirements.in @@ -10,4 +10,10 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file +# limitations under the License. + +confluent-kafka==2.3.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/telemetry/frontend/service/TelemetryFrontendService.py b/src/telemetry/frontend/service/TelemetryFrontendService.py index dc3f8df363a882db0f0ba3112a38f3bba3921c30..abd361aa0082e2de1d1f5fa7e81a336f3091af9a 100644 --- a/src/telemetry/frontend/service/TelemetryFrontendService.py +++ b/src/telemetry/frontend/service/TelemetryFrontendService.py @@ -14,17 +14,16 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc -from monitoring.service.NameMapping import NameMapping from common.tools.service.GenericGrpcService import GenericGrpcService from common.proto.telemetry_frontend_pb2_grpc import add_TelemetryFrontendServiceServicer_to_server from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl class TelemetryFrontendService(GenericGrpcService): - def __init__(self, name_mapping : NameMapping, cls_name: str = __name__) -> None: + def __init__(self, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND) super().__init__(port, cls_name=cls_name) - self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl(name_mapping) + self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl() def install_servicers(self): add_TelemetryFrontendServiceServicer_to_server(self.telemetry_frontend_servicer, self.server) diff --git a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py index e6830ad676d3934c88b01575ebdd1d0549fb00d1..2b872dba33bbe1434b68d5b5d2449e0b228312f7 100644 --- a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py +++ b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py @@ -12,126 +12,160 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ast +import json import threading -import time -from typing import Tuple, Any +from typing import Any, Dict import grpc import logging -from confluent_kafka import Consumer as KafkaConsumer +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from common.proto.context_pb2 import Empty -from monitoring.service.NameMapping import NameMapping -from confluent_kafka import Producer as KafkaProducer -from confluent_kafka import KafkaException -from confluent_kafka import KafkaError from common.proto.telemetry_frontend_pb2 import CollectorId, Collector, CollectorFilter, CollectorList -from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceServicer from telemetry.database.TelemetryModel import Collector as CollectorModel -from telemetry.database.managementDB import managementDB +from telemetry.database.Telemetry_DB import TelemetryDB + +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import Producer as KafkaProducer +from confluent_kafka import KafkaError + LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Monitoring', 'TelemetryFrontend') -KAFKA_SERVER_IP = '127.0.0.1:9092' -ACTIVE_COLLECTORS = [] -KAFKA_TOPICS = {'request' : 'topic_request', - 'response': 'topic_response'} +METRICS_POOL = MetricsPool('TelemetryFrontend', 'NBIgRPC') +ACTIVE_COLLECTORS = [] # keep and can be populated from DB class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer): - def __init__(self, name_mapping : NameMapping): + def __init__(self): LOGGER.info('Init TelemetryFrontendService') - self.managementDBobj = managementDB() - self.kafka_producer = KafkaProducer({'bootstrap.servers': KAFKA_SERVER_IP,}) - self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KAFKA_SERVER_IP, - 'group.id' : 'frontend', - 'auto.offset.reset' : 'latest'}) - - def add_collector_to_db(self, request: Collector ): # type: ignore - try: - # Create a new Collector instance - collector_to_insert = CollectorModel() - collector_to_insert.collector_id = request.collector_id.collector_id.uuid - collector_to_insert.kpi_id = request.kpi_id.kpi_id.uuid - # collector_to_insert.collector_decription= request.collector - collector_to_insert.sampling_duration_s = request.duration_s - collector_to_insert.sampling_interval_s = request.interval_s - collector_to_insert.start_timestamp = time.time() - collector_to_insert.end_timestamp = time.time() - managementDB.add_row_to_db(collector_to_insert) - except Exception as e: - LOGGER.info("Unable to create collectorModel class object. {:}".format(e)) + self.tele_db_obj = TelemetryDB() + self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()}) + self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : 'frontend', + 'auto.offset.reset' : 'latest'}) - # @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def StartCollector(self, request : Collector, grpc_context: grpc.ServicerContext # type: ignore ) -> CollectorId: # type: ignore - # push info to frontend db LOGGER.info ("gRPC message: {:}".format(request)) response = CollectorId() - _collector_id = str(request.collector_id.collector_id.uuid) - _collector_kpi_id = str(request.kpi_id.kpi_id.uuid) - _collector_duration = int(request.duration_s) - _collector_interval = int(request.interval_s) - # pushing Collector to DB - self.add_collector_to_db(request) - self.publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval) - # self.run_publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval) - response.collector_id.uuid = request.collector_id.collector_id.uuid # type: ignore + + # TODO: Verify the presence of Kpi ID in KpiDB or assume that KPI ID already exists? + self.tele_db_obj.add_row_to_db( + CollectorModel.ConvertCollectorToRow(request) + ) + self.PublishStartRequestOnKafka(request) + + response.collector_id.uuid = request.collector_id.collector_id.uuid return response - - def run_publish_to_kafka_request_topic(self, msg_key: str, kpi: str, duration : int, interval: int): - # Add threading.Thread() response to dictonary and call start() in the next statement - threading.Thread(target=self.publish_to_kafka_request_topic, args=(msg_key, kpi, duration, interval)).start() - - def publish_to_kafka_request_topic(self, - collector_id: str, kpi: str, duration : int, interval: int - ): + + def PublishStartRequestOnKafka(self, collector_obj): """ - Method to generate collector request to Kafka topic. + Method to generate collector request on Kafka. """ - # time.sleep(5) - # producer_configs = { - # 'bootstrap.servers': KAFKA_SERVER_IP, - # } - # topic_request = "topic_request" - msg_value : Tuple [str, int, int] = (kpi, duration, interval) - # print ("Request generated: ", "Colletcor Id: ", collector_id, \ - # ", \nKPI: ", kpi, ", Duration: ", duration, ", Interval: ", interval) - # producerObj = KafkaProducer(producer_configs) - self.kafka_producer.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback) - # producerObj.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback) - LOGGER.info("Collector Request Generated: {:}, {:}, {:}, {:}".format(collector_id, kpi, duration, interval)) - # producerObj.produce(topic_request, key=collector_id, value= str(msg_value), callback=self.delivery_callback) - ACTIVE_COLLECTORS.append(collector_id) + collector_uuid = collector_obj.collector_id.collector_id.uuid + collector_to_generate : Dict = { + "kpi_id" : collector_obj.kpi_id.kpi_id.uuid, + "duration": collector_obj.duration_s, + "interval": collector_obj.interval_s + } + self.kafka_producer.produce( + KafkaTopic.REQUEST.value, + key = collector_uuid, + value = json.dumps(collector_to_generate), + callback = self.delivery_callback + ) + LOGGER.info("Collector Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_generate)) + ACTIVE_COLLECTORS.append(collector_uuid) self.kafka_producer.flush() - def run_kafka_listener(self): - # print ("--- STARTED: run_kafka_listener ---") - threading.Thread(target=self.kafka_listener).start() + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def StopCollector(self, + request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore + ) -> Empty: # type: ignore + LOGGER.info ("gRPC message: {:}".format(request)) + self.PublishStopRequestOnKafka(request) + return Empty() + + def PublishStopRequestOnKafka(self, collector_id): + """ + Method to generate stop collector request on Kafka. + """ + collector_uuid = collector_id.collector_id.uuid + collector_to_stop : Dict = { + "kpi_id" : collector_uuid, + "duration": -1, + "interval": -1 + } + self.kafka_producer.produce( + KafkaTopic.REQUEST.value, + key = collector_uuid, + value = json.dumps(collector_to_stop), + callback = self.delivery_callback + ) + LOGGER.info("Collector Stop Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_stop)) + try: + ACTIVE_COLLECTORS.remove(collector_uuid) + except ValueError: + LOGGER.warning('Collector ID {:} not found in active collector list'.format(collector_uuid)) + self.kafka_producer.flush() + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SelectCollectors(self, + request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore + ) -> CollectorList: # type: ignore + LOGGER.info("gRPC message: {:}".format(request)) + response = CollectorList() + + try: + rows = self.tele_db_obj.select_with_filter(CollectorModel, request) + except Exception as e: + LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e)) + try: + for row in rows: + collector_obj = CollectorModel.ConvertRowToCollector(row) + response.collector_list.append(collector_obj) + return response + except Exception as e: + LOGGER.info('Unable to process filter response {:}'.format(e)) + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def delivery_callback(self, err, msg): + """ + Callback function to handle message delivery status. + Args: + err (KafkaError): Kafka error object. + msg (Message): Kafka message object. + """ + if err: + LOGGER.debug('Message delivery failed: {:}'.format(err)) + print('Message delivery failed: {:}'.format(err)) + # else: + # LOGGER.debug('Message delivered to topic {:}'.format(msg.topic())) + # print('Message delivered to topic {:}'.format(msg.topic())) + + # ---------- Independent Method --------------- + # Listener method is independent of any method (same lifetime as service) + # continously listens for responses + def RunResponseListener(self): + threading.Thread(target=self.ResponseListener).start() return True - def kafka_listener(self): + def ResponseListener(self): """ listener for response on Kafka topic. """ - # # print ("--- STARTED: kafka_listener ---") - # conusmer_configs = { - # 'bootstrap.servers' : KAFKA_SERVER_IP, - # 'group.id' : 'frontend', - # 'auto.offset.reset' : 'latest' - # } - # # topic_response = "topic_response" - - # consumerObj = KafkaConsumer(conusmer_configs) - self.kafka_consumer.subscribe([KAFKA_TOPICS['response']]) - # print (time.time()) + self.kafka_consumer.subscribe([KafkaTopic.RESPONSE.value]) while True: receive_msg = self.kafka_consumer.poll(2.0) if receive_msg is None: - # print (" - Telemetry frontend listening on Kafka Topic: ", KAFKA_TOPICS['response']) # added for debugging purposes continue elif receive_msg.error(): if receive_msg.error().code() == KafkaError._PARTITION_EOF: @@ -142,63 +176,16 @@ class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer): try: collector_id = receive_msg.key().decode('utf-8') if collector_id in ACTIVE_COLLECTORS: - (kpi_id, kpi_value) = ast.literal_eval(receive_msg.value().decode('utf-8')) - self.process_response(collector_id, kpi_id, kpi_value) + kpi_value = json.loads(receive_msg.value().decode('utf-8')) + self.process_response(collector_id, kpi_value['kpi_id'], kpi_value['kpi_value']) else: print(f"collector id does not match.\nRespone ID: '{collector_id}' --- Active IDs: '{ACTIVE_COLLECTORS}' ") except Exception as e: - print(f"No message key found: {str(e)}") + print(f"Error extarcting msg key or value: {str(e)}") continue - # return None def process_response(self, collector_id: str, kpi_id: str, kpi_value: Any): if kpi_id == "-1" and kpi_value == -1: - # LOGGER.info("Sucessfully terminated Collector: {:}".format(collector_id)) - print ("Sucessfully terminated Collector: ", collector_id) - else: - print ("Frontend-Received values Collector Id:", collector_id, "-KPI:", kpi_id, "-VALUE:", kpi_value) - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def delivery_callback(self, err, msg): - """ - Callback function to handle message delivery status. - Args: - err (KafkaError): Kafka error object. - msg (Message): Kafka message object. - """ - if err: - print(f'Message delivery failed: {err}') + print ("Backend termination confirmation for collector id: ", collector_id) else: - print(f'Message delivered to topic {msg.topic()}') - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def StopCollector(self, - request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore - ) -> Empty: # type: ignore - LOGGER.info ("gRPC message: {:}".format(request)) - _collector_id = request.collector_id.uuid - self.publish_to_kafka_request_topic(_collector_id, "", -1, -1) - return Empty() - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def SelectCollectors(self, - request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore - ) -> CollectorList: # type: ignore - LOGGER.info("gRPC message: {:}".format(request)) - response = CollectorList() - filter_to_apply = dict() - filter_to_apply['kpi_id'] = request.kpi_id[0].kpi_id.uuid - # filter_to_apply['duration_s'] = request.duration_s[0] - try: - rows = self.managementDBobj.select_with_filter(CollectorModel, **filter_to_apply) - except Exception as e: - LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e)) - try: - if len(rows) != 0: - for row in rows: - collector_obj = Collector() - collector_obj.collector_id.collector_id.uuid = row.collector_id - response.collector_list.append(collector_obj) - return response - except Exception as e: - LOGGER.info('Unable to process response {:}'.format(e)) \ No newline at end of file + print ("KPI Value: Collector Id:", collector_id, ", Kpi Id:", kpi_id, ", Value:", kpi_value) diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py index 3b0263706c3dad3756306d1ba8a3a104d568cd6f..2a6c5dbcf2da6b6a074c2b8ee23791bc4896442f 100644 --- a/src/telemetry/frontend/service/__main__.py +++ b/src/telemetry/frontend/service/__main__.py @@ -12,16 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import signal -import sys -import logging, threading +import logging, signal, sys, threading from prometheus_client import start_http_server -from monitoring.service.NameMapping import NameMapping +from common.Settings import get_log_level, get_metrics_port from .TelemetryFrontendService import TelemetryFrontendService -from monitoring.service.EventTools import EventsDeviceCollector -from common.Settings import ( - get_log_level, wait_for_environment_variables, get_env_var_name, - get_metrics_port ) terminate = threading.Event() LOGGER = None @@ -31,20 +25,12 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name terminate.set() def main(): - global LOGGER + global LOGGER # pylint: disable=global-statement log_level = get_log_level() logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) -# ------- will be added later -------------- - # wait_for_environment_variables([ - # get_env_var_name - - - # ]) -# ------- will be added later -------------- - signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) @@ -54,9 +40,7 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) - name_mapping = NameMapping() - - grpc_service = TelemetryFrontendService(name_mapping) + grpc_service = TelemetryFrontendService() grpc_service.start() # Wait for Ctrl+C or termination signal @@ -69,4 +53,4 @@ def main(): return 0 if __name__ == '__main__': - sys.exit(main()) \ No newline at end of file + sys.exit(main()) diff --git a/src/telemetry/frontend/tests/Messages.py b/src/telemetry/frontend/tests/Messages.py index 1205898d13a610cd262979242e4f489f5e35cdb8..a0e93e8a121b9efaac83f7169419911c8ee6e3ea 100644 --- a/src/telemetry/frontend/tests/Messages.py +++ b/src/telemetry/frontend/tests/Messages.py @@ -16,68 +16,27 @@ import uuid import random from common.proto import telemetry_frontend_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType - +from common.proto.kpi_manager_pb2 import KpiId # ----------------------- "2nd" Iteration -------------------------------- def create_collector_id(): _collector_id = telemetry_frontend_pb2.CollectorId() - _collector_id.collector_id.uuid = uuid.uuid4() + # _collector_id.collector_id.uuid = str(uuid.uuid4()) + _collector_id.collector_id.uuid = "5d45f53f-d567-429f-9427-9196ac72ff0c" return _collector_id -# def create_collector_id_a(coll_id_str : str): -# _collector_id = telemetry_frontend_pb2.CollectorId() -# _collector_id.collector_id.uuid = str(coll_id_str) -# return _collector_id - def create_collector_request(): _create_collector_request = telemetry_frontend_pb2.Collector() _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4()) - _create_collector_request.kpi_id.kpi_id.uuid = "165d20c5-a446-42fa-812f-e2b7ed283c6f" - # _create_collector_request.collector = "collector description" + _create_collector_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) _create_collector_request.duration_s = float(random.randint(8, 16)) _create_collector_request.interval_s = float(random.randint(2, 4)) return _create_collector_request def create_collector_filter(): _create_collector_filter = telemetry_frontend_pb2.CollectorFilter() - new_kpi_id = _create_collector_filter.kpi_id.add() - new_kpi_id.kpi_id.uuid = "165d20c5-a446-42fa-812f-e2b7ed283c6f" + kpi_id_obj = KpiId() + # kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) + kpi_id_obj.kpi_id.uuid = "a7237fa3-caf4-479d-84b6-4d9f9738fb7f" + _create_collector_filter.kpi_id.append(kpi_id_obj) return _create_collector_filter - -# ----------------------- "First" Iteration -------------------------------- -# def create_collector_request_a(): -# _create_collector_request_a = telemetry_frontend_pb2.Collector() -# _create_collector_request_a.collector_id.collector_id.uuid = "-1" -# return _create_collector_request_a - -# def create_collector_request_b(str_kpi_id, coll_duration_s, coll_interval_s -# ) -> telemetry_frontend_pb2.Collector: -# _create_collector_request_b = telemetry_frontend_pb2.Collector() -# _create_collector_request_b.collector_id.collector_id.uuid = '1' -# _create_collector_request_b.kpi_id.kpi_id.uuid = str_kpi_id -# _create_collector_request_b.duration_s = coll_duration_s -# _create_collector_request_b.interval_s = coll_interval_s -# return _create_collector_request_b - -# def create_collector_filter(): -# _create_collector_filter = telemetry_frontend_pb2.CollectorFilter() -# new_collector_id = _create_collector_filter.collector_id.add() -# new_collector_id.collector_id.uuid = "COLL1" -# new_kpi_id = _create_collector_filter.kpi_id.add() -# new_kpi_id.kpi_id.uuid = "KPI1" -# new_device_id = _create_collector_filter.device_id.add() -# new_device_id.device_uuid.uuid = 'DEV1' -# new_service_id = _create_collector_filter.service_id.add() -# new_service_id.service_uuid.uuid = 'SERV1' -# new_slice_id = _create_collector_filter.slice_id.add() -# new_slice_id.slice_uuid.uuid = 'SLC1' -# new_endpoint_id = _create_collector_filter.endpoint_id.add() -# new_endpoint_id.endpoint_uuid.uuid = 'END1' -# new_connection_id = _create_collector_filter.connection_id.add() -# new_connection_id.connection_uuid.uuid = 'CON1' -# _create_collector_filter.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED) -# return _create_collector_filter - -# def create_collector_list(): -# _create_collector_list = telemetry_frontend_pb2.CollectorList() -# return _create_collector_list \ No newline at end of file diff --git a/src/telemetry/frontend/tests/test_frontend.py b/src/telemetry/frontend/tests/test_frontend.py index 002cc430721845aa5aa18274375e2c22b5d77ff7..9c3f9d3a8f545792eb2bb3a371c6c20664d24f69 100644 --- a/src/telemetry/frontend/tests/test_frontend.py +++ b/src/telemetry/frontend/tests/test_frontend.py @@ -13,129 +13,40 @@ # limitations under the License. import os -import time import pytest import logging -from typing import Union -from common.proto.context_pb2 import Empty from common.Constants import ServiceNameEnum from common.proto.telemetry_frontend_pb2 import CollectorId, CollectorList -from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server -from context.client.ContextClient import ContextClient -from common.tools.service.GenericGrpcService import GenericGrpcService -from common.tests.MockServicerImpl_Context import MockServicerImpl_Context +from common.proto.context_pb2 import Empty +from common.tools.kafka.Variables import KafkaTopic from common.Settings import ( get_service_port_grpc, get_env_var_name, ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC) from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient from telemetry.frontend.service.TelemetryFrontendService import TelemetryFrontendService +from telemetry.frontend.tests.Messages import ( + create_collector_request, create_collector_id, create_collector_filter) from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl -from telemetry.frontend.tests.Messages import ( create_collector_request, create_collector_filter) -from telemetry.database.managementDB import managementDB -from telemetry.database.TelemetryEngine import TelemetryEngine - -from device.client.DeviceClient import DeviceClient -from device.service.DeviceService import DeviceService -from device.service.driver_api.DriverFactory import DriverFactory -from device.service.driver_api.DriverInstanceCache import DriverInstanceCache -from monitoring.service.NameMapping import NameMapping - -os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' -from device.service.drivers import DRIVERS ########################### # Tests Setup ########################### LOCAL_HOST = '127.0.0.1' -MOCKSERVICE_PORT = 10000 -TELEMETRY_FRONTEND_PORT = str(MOCKSERVICE_PORT) + str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)) +TELEMETRY_FRONTEND_PORT = str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)) os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(TELEMETRY_FRONTEND_PORT) LOGGER = logging.getLogger(__name__) -class MockContextService(GenericGrpcService): - # Mock Service implementing Context to simplify unitary tests of Monitoring - - def __init__(self, bind_port: Union[str, int]) -> None: - super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') - - # pylint: disable=attribute-defined-outside-init - def install_servicers(self): - self.context_servicer = MockServicerImpl_Context() - add_ContextServiceServicer_to_server(self.context_servicer, self.server) - @pytest.fixture(scope='session') -def context_service(): - LOGGER.info('Initializing MockContextService...') - _service = MockContextService(MOCKSERVICE_PORT) - _service.start() - - LOGGER.info('Yielding MockContextService...') - yield _service - - LOGGER.info('Terminating MockContextService...') - _service.context_servicer.msg_broker.terminate() - _service.stop() - - LOGGER.info('Terminated MockContextService...') - -@pytest.fixture(scope='session') -def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument - LOGGER.info('Initializing ContextClient...') - _client = ContextClient() - - LOGGER.info('Yielding ContextClient...') - yield _client - - LOGGER.info('Closing ContextClient...') - _client.close() - - LOGGER.info('Closed ContextClient...') - -@pytest.fixture(scope='session') -def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument - LOGGER.info('Initializing DeviceService...') - driver_factory = DriverFactory(DRIVERS) - driver_instance_cache = DriverInstanceCache(driver_factory) - _service = DeviceService(driver_instance_cache) - _service.start() - - # yield the server, when test finishes, execution will resume to stop it - LOGGER.info('Yielding DeviceService...') - yield _service - - LOGGER.info('Terminating DeviceService...') - _service.stop() - - LOGGER.info('Terminated DeviceService...') - -@pytest.fixture(scope='session') -def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument - LOGGER.info('Initializing DeviceClient...') - _client = DeviceClient() - - LOGGER.info('Yielding DeviceClient...') - yield _client - - LOGGER.info('Closing DeviceClient...') - _client.close() - - LOGGER.info('Closed DeviceClient...') - -@pytest.fixture(scope='session') -def telemetryFrontend_service( - context_service : MockContextService, - device_service : DeviceService - ): +def telemetryFrontend_service(): LOGGER.info('Initializing TelemetryFrontendService...') - name_mapping = NameMapping() - _service = TelemetryFrontendService(name_mapping) + _service = TelemetryFrontendService() _service.start() # yield the server, when test finishes, execution will resume to stop it @@ -168,37 +79,73 @@ def telemetryFrontend_client( # Tests Implementation of Telemetry Frontend ########################### -def test_verify_db_and_table(): - LOGGER.info(' >>> test_verify_database_and_tables START: <<< ') - _engine = TelemetryEngine.get_engine() - managementDB.create_database(_engine) - managementDB.create_tables(_engine) +# ------- Re-structuring Test --------- +# --- "test_validate_kafka_topics" should be run before the functionality tests --- +def test_validate_kafka_topics(): + LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") + response = KafkaTopic.create_all_topics() + assert isinstance(response, bool) +# ----- core funtionality test ----- def test_StartCollector(telemetryFrontend_client): LOGGER.info(' >>> test_StartCollector START: <<< ') response = telemetryFrontend_client.StartCollector(create_collector_request()) LOGGER.debug(str(response)) assert isinstance(response, CollectorId) -def test_run_kafka_listener(): - LOGGER.info(' >>> test_run_kafka_listener START: <<< ') - name_mapping = NameMapping() - TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping) - response = TelemetryFrontendServiceObj.run_kafka_listener() # Method "run_kafka_listener" is not define in frontend.proto - LOGGER.debug(str(response)) - assert isinstance(response, bool) - def test_StopCollector(telemetryFrontend_client): LOGGER.info(' >>> test_StopCollector START: <<< ') - _collector_id = telemetryFrontend_client.StartCollector(create_collector_request()) - time.sleep(3) # wait for small amount before call the stopCollecter() - response = telemetryFrontend_client.StopCollector(_collector_id) + response = telemetryFrontend_client.StopCollector(create_collector_id()) LOGGER.debug(str(response)) assert isinstance(response, Empty) -def test_select_collectors(telemetryFrontend_client): - LOGGER.info(' >>> test_select_collector requesting <<< ') +def test_SelectCollectors(telemetryFrontend_client): + LOGGER.info(' >>> test_SelectCollectors START: <<< ') response = telemetryFrontend_client.SelectCollectors(create_collector_filter()) - LOGGER.info('Received Rows after applying Filter: {:} '.format(response)) LOGGER.debug(str(response)) - assert isinstance(response, CollectorList) \ No newline at end of file + assert isinstance(response, CollectorList) + +# ----- Non-gRPC method tests ----- +def test_RunResponseListener(): + LOGGER.info(' >>> test_RunResponseListener START: <<< ') + TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl() + response = TelemetryFrontendServiceObj.RunResponseListener() # becasue Method "run_kafka_listener" is not define in frontend.proto + LOGGER.debug(str(response)) + assert isinstance(response, bool) + +# ------- previous test ---------------- + +# def test_verify_db_and_table(): +# LOGGER.info(' >>> test_verify_database_and_tables START: <<< ') +# _engine = TelemetryEngine.get_engine() +# managementDB.create_database(_engine) +# managementDB.create_tables(_engine) + +# def test_StartCollector(telemetryFrontend_client): +# LOGGER.info(' >>> test_StartCollector START: <<< ') +# response = telemetryFrontend_client.StartCollector(create_collector_request()) +# LOGGER.debug(str(response)) +# assert isinstance(response, CollectorId) + +# def test_run_kafka_listener(): +# LOGGER.info(' >>> test_run_kafka_listener START: <<< ') +# name_mapping = NameMapping() +# TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping) +# response = TelemetryFrontendServiceObj.run_kafka_listener() # Method "run_kafka_listener" is not define in frontend.proto +# LOGGER.debug(str(response)) +# assert isinstance(response, bool) + +# def test_StopCollector(telemetryFrontend_client): +# LOGGER.info(' >>> test_StopCollector START: <<< ') +# _collector_id = telemetryFrontend_client.StartCollector(create_collector_request()) +# time.sleep(3) # wait for small amount before call the stopCollecter() +# response = telemetryFrontend_client.StopCollector(_collector_id) +# LOGGER.debug(str(response)) +# assert isinstance(response, Empty) + +# def test_select_collectors(telemetryFrontend_client): +# LOGGER.info(' >>> test_select_collector requesting <<< ') +# response = telemetryFrontend_client.SelectCollectors(create_collector_filter()) +# LOGGER.info('Received Rows after applying Filter: {:} '.format(response)) +# LOGGER.debug(str(response)) +# assert isinstance(response, CollectorList) \ No newline at end of file diff --git a/src/telemetry/telemetry_virenv.txt b/src/telemetry/telemetry_virenv.txt deleted file mode 100644 index e39f80b6593d6c41411751cdd0ea59ee05344570..0000000000000000000000000000000000000000 --- a/src/telemetry/telemetry_virenv.txt +++ /dev/null @@ -1,49 +0,0 @@ -anytree==2.8.0 -APScheduler==3.10.1 -attrs==23.2.0 -certifi==2024.2.2 -charset-normalizer==2.0.12 -colorama==0.4.6 -confluent-kafka==2.3.0 -coverage==6.3 -future-fstrings==1.2.0 -greenlet==3.0.3 -grpcio==1.47.5 -grpcio-health-checking==1.47.5 -grpcio-tools==1.47.5 -grpclib==0.4.4 -h2==4.1.0 -hpack==4.0.0 -hyperframe==6.0.1 -idna==3.7 -influx-line-protocol==0.1.4 -iniconfig==2.0.0 -kafka-python==2.0.2 -multidict==6.0.5 -networkx==3.3 -packaging==24.0 -pluggy==1.5.0 -prettytable==3.5.0 -prometheus-client==0.13.0 -protobuf==3.20.3 -psycopg2-binary==2.9.3 -py==1.11.0 -py-cpuinfo==9.0.0 -pytest==6.2.5 -pytest-benchmark==3.4.1 -pytest-depends==1.0.1 -python-dateutil==2.8.2 -python-json-logger==2.0.2 -pytz==2024.1 -questdb==1.0.1 -requests==2.27.1 -six==1.16.0 -SQLAlchemy==1.4.52 -sqlalchemy-cockroachdb==1.4.4 -SQLAlchemy-Utils==0.38.3 -toml==0.10.2 -typing_extensions==4.12.0 -tzlocal==5.2 -urllib3==1.26.18 -wcwidth==0.2.13 -xmltodict==0.12.0 diff --git a/src/telemetry/database/tests/messages.py b/src/telemetry/tests/messages.py similarity index 100% rename from src/telemetry/database/tests/messages.py rename to src/telemetry/tests/messages.py diff --git a/scripts/run_tests_locally-telemetry-mgtDB.sh b/src/telemetry/tests/test_telemetryDB.py old mode 100755 new mode 100644 similarity index 59% rename from scripts/run_tests_locally-telemetry-mgtDB.sh rename to src/telemetry/tests/test_telemetryDB.py index 8b68104eaf343b57ec4953334cda37167cca3529..c4976f8c2144fcdcad43a3e25d43091010de0d18 --- a/scripts/run_tests_locally-telemetry-mgtDB.sh +++ b/src/telemetry/tests/test_telemetryDB.py @@ -1,4 +1,3 @@ -#!/bin/bash # Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,13 +13,16 @@ # limitations under the License. -PROJECTDIR=`pwd` +import logging +from telemetry.database.Telemetry_DB import TelemetryDB -cd $PROJECTDIR/src -# RCFILE=$PROJECTDIR/coverage/.coveragerc -# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ -# kpi_manager/tests/test_unitary.py +LOGGER = logging.getLogger(__name__) -RCFILE=$PROJECTDIR/coverage/.coveragerc -python3 -m pytest --log-cli-level=INFO --verbose \ - telemetry/database/tests/managementDBtests.py +def test_verify_databases_and_tables(): + LOGGER.info('>>> test_verify_databases_and_tables : START <<< ') + TelemetryDBobj = TelemetryDB() + TelemetryDBobj.drop_database() + TelemetryDBobj.verify_tables() + TelemetryDBobj.create_database() + TelemetryDBobj.create_tables() + TelemetryDBobj.verify_tables() \ No newline at end of file