diff --git a/deploy/all.sh b/deploy/all.sh index c6da23366d3cd74b63fde87c3f24960c3bc2999b..484640c67b34ad0253738b996902ef75a887dbc8 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -18,6 +18,9 @@ # Read deployment settings ######################################################################################################################## + +# ----- TeraFlowSDN ------------------------------------------------------------ + # If not already set, set the URL of the Docker registry where the images will be uploaded to. # By default, assume internal MicroK8s registry is used. export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} @@ -42,6 +45,9 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} + +# ----- CockroachDB ------------------------------------------------------------ + # If not already set, set the namespace where CockroackDB will be deployed. export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} @@ -84,6 +90,9 @@ export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""} # If CRDB_REDEPLOY is "YES", the database will be dropped while checking/deploying CockroachDB. export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""} + +# ----- NATS ------------------------------------------------------------------- + # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} @@ -99,6 +108,32 @@ export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} export NATS_REDEPLOY=${NATS_REDEPLOY:-""} +# ----- QuestDB ---------------------------------------------------------------- + +# If not already set, set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} + +# If not already set, set the database username to be used by Monitoring. +export QDB_USERNAME=${QDB_USERNAME:-"admin"} + +# If not already set, set the database user's password to be used by Monitoring. +export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} + +# If not already set, set the table name to be used by Monitoring. +export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} + +## If not already set, disable flag for dropping table if exists. +## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! +## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while +## checking/deploying QuestDB. +#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""} + +# If not already set, disable flag for re-deploying QuestDB from scratch. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! +# If QDB_REDEPLOY is "YES", the database will be dropped while checking/deploying QuestDB. +export QDB_REDEPLOY=${QDB_REDEPLOY:-""} + + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -109,7 +144,10 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} # Deploy NATS ./deploy/nats.sh -# Deploy TFS +# Deploy QuestDB +./deploy/qdb.sh + +# Deploy TeraFlowSDN ./deploy/tfs.sh # Show deploy summary diff --git a/deploy/crdb.sh b/deploy/crdb.sh index 598980ac84a3b49e55402993e22ed963e80b2e38..90456773b615e9914f601133cb7da949811e06ff 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -44,7 +44,7 @@ export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} # If not already set, disable flag for dropping database if exists. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! -# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while +# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while # checking/deploying CockroachDB. export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""} diff --git a/deploy/qdb.sh b/deploy/qdb.sh new file mode 100755 index 0000000000000000000000000000000000000000..1e9d4d8ee264032a6e0073223e1363f7bf3d7910 --- /dev/null +++ b/deploy/qdb.sh @@ -0,0 +1,165 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# If not already set, set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} + +# If not already set, set the database username to be used by Monitoring. +export QDB_USERNAME=${QDB_USERNAME:-"admin"} + +# If not already set, set the database user's password to be used by Monitoring. +export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} + +# If not already set, set the table name to be used by Monitoring. +export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} + +## If not already set, disable flag for dropping table if exists. +## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! +## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while +## checking/deploying QuestDB. +#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""} + +# If not already set, disable flag for re-deploying QuestDB from scratch. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! +# If QDB_REDEPLOY is "YES", the database will be dropped while checking/deploying QuestDB. +export QDB_REDEPLOY=${QDB_REDEPLOY:-""} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Constants +TMP_FOLDER="./tmp" +QDB_MANIFESTS_PATH="manifests/questdb" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +QDB_LOG_FILE="$TMP_LOGS_FOLDER/qdb_deploy.log" +mkdir -p $TMP_LOGS_FOLDER + +function qdb_deploy() { + echo "QuestDB Namespace" + echo ">>> Create QuestDB Namespace (if missing)" + kubectl create namespace ${QDB_NAMESPACE} + echo + + echo "QuestDB" + echo ">>> Checking if QuestDB is deployed..." + if kubectl get --namespace ${QDB_NAMESPACE} statefulset/questdb &> /dev/null; then + echo ">>> QuestDB is present; skipping step." + else + echo ">>> Deploy QuestDB" + cp "${QDB_MANIFESTS_PATH}/manifest.yaml" "${TMP_MANIFESTS_FOLDER}/qdb_manifest.yaml" + kubectl apply --namespace ${QDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/qdb_manifest.yaml" + + echo ">>> Waiting QuestDB statefulset to be created..." + while ! kubectl get --namespace ${QDB_NAMESPACE} statefulset/questdb &> /dev/null; do + printf "%c" "." + sleep 1 + done + + # Wait for statefulset condition "Available=True" does not work + # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error: + # "error: readyReplicas is not found" + # Workaround: Check the pods are ready + #echo ">>> QuestDB statefulset created. Waiting for readiness condition..." + #kubectl wait --namespace ${QDB_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/questdb + #kubectl wait --namespace ${QDB_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \ + # statefulset/questdb + echo ">>> QuestDB statefulset created. Waiting QuestDB pods to be created..." + while ! kubectl get --namespace ${QDB_NAMESPACE} pod/questdb-0 &> /dev/null; do + printf "%c" "." + sleep 1 + done + kubectl wait --namespace ${QDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/questdb-0 + fi + echo + + echo "QuestDB Port Mapping" + echo ">>> Expose QuestDB SQL port (8812->8812)" + QDB_SQL_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') + PATCH='{"data": {"'${QDB_SQL_PORT}'": "'${QDB_NAMESPACE}'/questdb-public:'${QDB_SQL_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${QDB_SQL_PORT}', "hostPort": '${QDB_SQL_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose QuestDB Influx Line Protocol port (9009->9009)" + QDB_ILP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="ilp")].port}') + PATCH='{"data": {"'${QDB_ILP_PORT}'": "'${QDB_NAMESPACE}'/questdb-public:'${QDB_ILP_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${QDB_ILP_PORT}', "hostPort": '${QDB_ILP_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose QuestDB HTTP Mgmt GUI port (9000->9000)" + QDB_GUI_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') + PATCH='{"data": {"'${QDB_GUI_PORT}'": "'${QDB_NAMESPACE}'/questdb-public:'${QDB_GUI_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${QDB_GUI_PORT}', "hostPort": '${QDB_GUI_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo +} + +function qdb_undeploy() { + echo "QuestDB" + echo ">>> Checking if QuestDB is deployed..." + if kubectl get --namespace ${QDB_NAMESPACE} statefulset/questdb &> /dev/null; then + echo ">>> Undeploy QuestDB" + kubectl delete --namespace ${QDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/qdb_manifest.yaml" --ignore-not-found + else + echo ">>> QuestDB is not present; skipping step." + fi + echo + + echo "QuestDB Namespace" + echo ">>> Delete QuestDB Namespace (if exists)" + echo "NOTE: this step might take few minutes to complete!" + kubectl delete namespace ${QDB_NAMESPACE} --ignore-not-found + echo +} + +# TODO: implement method to drop table +#function qdb_drop_table() { +# echo "Drop table if exists" +# QDB_CLIENT_URL="postgresql://${QDB_USERNAME}:${QDB_PASSWORD}@questdb-0:${QDB_SQL_PORT}/defaultdb?sslmode=require" +# kubectl exec -it --namespace ${QDB_NAMESPACE} questdb-0 -- \ +# ./qdb sql --certs-dir=/qdb/qdb-certs --url=${QDB_CLIENT_URL} \ +# --execute "DROP TABLE IF EXISTS ${QDB_TABLE};" +# echo +#} + +if [ "$QDB_REDEPLOY" == "YES" ]; then + qdb_undeploy +#elif [ "$QDB_DROP_TABLE_IF_EXISTS" == "YES" ]; then +# qdb_drop_table +fi +qdb_deploy diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 86043ee44829904786e700df813400476ca4e755..c0f376ee983a970e4c0fdb1aa8d5c18544874a99 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -57,6 +57,18 @@ export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} +# If not already set, set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} + +# If not already set, set the database username to be used by Monitoring. +export QDB_USERNAME=${QDB_USERNAME:-"admin"} + +# If not already set, set the database user's password to be used by Monitoring. +export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} + +# If not already set, set the table name to be used by Monitoring. +export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} + ######################################################################################################################## # Automated steps start here @@ -95,6 +107,22 @@ kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type= --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} printf "\n" +echo "Create secret with QuestDB data" +QDB_HTTP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') +QDB_ILP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="ilp")].port}') +QDB_SQL_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +METRICSDB_HOSTNAME="questdb-public.${QDB_NAMESPACE}.svc.cluster.local" +kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=QDB_NAMESPACE=${QDB_NAMESPACE} \ + --from-literal=METRICSDB_HOSTNAME=${METRICSDB_HOSTNAME} \ + --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \ + --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \ + --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \ + --from-literal=METRICSDB_TABLE=${QDB_TABLE} \ + --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \ + --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} +printf "\n" + echo "Deploying components and collecting environment variables..." ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT @@ -251,17 +279,6 @@ for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do done printf "\n" -# By now, leave these controls here. Some component dependencies are not well handled. - -if [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then - echo "Waiting for 'MonitoringDB' component..." - # Kubernetes does not implement --for='condition=available' for statefulsets. - # By now, we assume a single replica of monitoringdb. To be updated in future releases. - kubectl wait --namespace $TFS_K8S_NAMESPACE \ - --for=jsonpath='{.status.readyReplicas}'=1 --timeout=300s statefulset/monitoringdb - printf "\n" -fi - for COMPONENT in $TFS_COMPONENTS; do echo "Waiting for '$COMPONENT' component..." COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") @@ -272,36 +289,19 @@ done if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then echo "Configuring WebUI DataStores and Dashboards..." - sleep 3 - - # INFLUXDB_HOST="monitoringservice" - # INFLUXDB_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service/monitoringservice -o jsonpath='{.spec.ports[?(@.name=="influxdb")].port}') - # INFLUXDB_URL="http://${INFLUXDB_HOST}:${INFLUXDB_PORT}" - # INFLUXDB_USER=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_USER}' | base64 --decode) - # INFLUXDB_PASSWORD=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_PASSWORD}' | base64 --decode) - # INFLUXDB_DATABASE=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_DB}' | base64 --decode) + sleep 5 # Exposed through the ingress controller "tfs-ingress" - GRAFANA_HOSTNAME="127.0.0.1" - GRAFANA_PORT="80" - GRAFANA_BASEURL="/grafana" + GRAFANA_URL="127.0.0.1:80/grafana" # Default Grafana credentials GRAFANA_USERNAME="admin" GRAFANA_PASSWORD="admin" - # Default Grafana API URL - GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}" - - # Updated Grafana API URL - GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}" - - echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT - - echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..." - # Configure Grafana Admin Password # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password + GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}" + echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..." curl -X PUT -H "Content-Type: application/json" -d '{ "oldPassword": "'${GRAFANA_PASSWORD}'", "newPassword": "'${TFS_GRAFANA_PASSWORD}'", @@ -309,16 +309,21 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" }' ${GRAFANA_URL_DEFAULT}/api/user/password echo + # Updated Grafana API URL + GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}" + echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT + # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/ # TODO: replace user, password and database by variables to be saved + QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}" echo "Creating a datasource..." curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ "access" : "proxy", "type" : "postgres", - "name" : "monitoringdb", - "url" : "monitoringservice:8812", - "database" : "monitoring", - "user" : "admin", + "name" : "questdb", + "url" : "'${QDB_HOST_PORT}'", + "database" : "'${QDB_TABLE}'", + "user" : "'${QDB_USERNAME}'", "basicAuth": false, "isDefault": true, "jsonData" : { @@ -333,9 +338,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" "tlsConfigurationMethod": "file-path", "tlsSkipVerify" : true }, - "secureJsonData": { - "password": "quest" - } + "secureJsonData": {"password": "'${QDB_PASSWORD}'"} }' ${GRAFANA_URL_UPDATED}/api/datasources echo @@ -352,4 +355,3 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" printf "\n\n" fi - diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml index b5f3042ba068153856d42f15c174a80ebdc6f266..77e62518d950abcfe8c1ae832a4a3a7e552065ab 100644 --- a/manifests/monitoringservice.yaml +++ b/manifests/monitoringservice.yaml @@ -12,42 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: monitoringdb -spec: - selector: - matchLabels: - app: monitoringservice - serviceName: "monitoringservice" - replicas: 1 - template: - metadata: - labels: - app: monitoringservice - spec: - terminationGracePeriodSeconds: 5 - restartPolicy: Always - containers: - - name: metricsdb - image: questdb/questdb - ports: - - name: http - containerPort: 9000 - protocol: TCP - - name: influxdb - containerPort: 9009 - protocol: TCP - - name: postgre - containerPort: 8812 - protocol: TCP - env: - - name: QDB_CAIRO_COMMIT_LAG - value: "1000" - - name: QDB_CAIRO_MAX_UNCOMMITTED_ROWS - value: "100000" ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -63,29 +27,19 @@ spec: app: monitoringservice spec: terminationGracePeriodSeconds: 5 - restartPolicy: Always containers: - name: server image: labs.etsi.org:5050/tfs/controller/monitoring:latest imagePullPolicy: Always ports: - - name: grpc - containerPort: 7070 - protocol: TCP - - name: metrics - containerPort: 9192 - protocol: TCP + - containerPort: 7070 + - containerPort: 9192 env: - name: LOG_LEVEL value: "INFO" - - name: METRICSDB_HOSTNAME - value: "monitoringservice" - - name: METRICSDB_ILP_PORT - value: "9009" - - name: METRICSDB_REST_PORT - value: "9000" - - name: METRICSDB_TABLE - value: "monitoring" + envFrom: + - secretRef: + name: qdb-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:7070"] @@ -94,11 +48,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:7070"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service @@ -115,41 +69,7 @@ spec: protocol: TCP port: 7070 targetPort: 7070 - - name: http - protocol: TCP - port: 9000 - targetPort: 9000 - - name: influxdb - protocol: TCP - port: 9009 - targetPort: 9009 - - name: postgre - protocol: TCP - port: 8812 - targetPort: 8812 - name: metrics protocol: TCP port: 9192 targetPort: 9192 - ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: access-monitoring -spec: - podSelector: - matchLabels: - app: monitoringservice - ingress: - - from: [] - ports: - - port: 7070 - - port: 8812 - - from: - - podSelector: - matchLabels: - app: monitoringservice - ports: - - port: 9009 - - port: 9000 diff --git a/manifests/questdb/manifest.yaml b/manifests/questdb/manifest.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1da4bf2c5cd2edafc8cd07899ffe742aa07892a3 --- /dev/null +++ b/manifests/questdb/manifest.yaml @@ -0,0 +1,67 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: questdb +spec: + selector: + matchLabels: + app: questdb + serviceName: "questdb-public" + replicas: 1 + template: + metadata: + labels: + app: questdb + spec: + terminationGracePeriodSeconds: 5 + restartPolicy: Always + containers: + - name: metricsdb + image: questdb/questdb + ports: + - containerPort: 9000 + - containerPort: 9009 + - containerPort: 8812 + env: + - name: QDB_CAIRO_COMMIT_LAG + value: "1000" + - name: QDB_CAIRO_MAX_UNCOMMITTED_ROWS + value: "100000" +--- +apiVersion: v1 +kind: Service +metadata: + name: questdb-public + labels: + app: questdb +spec: + type: ClusterIP + selector: + app: questdb + ports: + - name: http + protocol: TCP + port: 9000 + targetPort: 9000 + - name: ilp + protocol: TCP + port: 9009 + targetPort: 9009 + - name: sql + protocol: TCP + port: 8812 + targetPort: 8812 diff --git a/my_deploy.sh b/my_deploy.sh index 276b4eead8f3f99c07a30d34eaa6cb0145310741..4c5475da58ed8ab19e7e023c76d92a21d6d4f6dd 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -37,7 +37,9 @@ export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. export TFS_SKIP_BUILD="" + # ----- CockroachDB ------------------------------------------------------------ + # Set the namespace where CockroackDB will be deployed. export CRDB_NAMESPACE="crdb" @@ -60,9 +62,32 @@ export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" + # ----- NATS ------------------------------------------------------------------- + # Set the namespace where NATS will be deployed. export NATS_NAMESPACE="nats" # Disable flag for re-deploying NATS from scratch. export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# If not already set, set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} + +# If not already set, set the database username to be used by Monitoring. +export QDB_USERNAME=${QDB_USERNAME:-"admin"} + +# If not already set, set the database user's password to be used by Monitoring. +export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} + +# If not already set, set the table name to be used by Monitoring. +export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} + +## If not already set, disable flag for dropping table if exists. +#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""} + +# If not already set, disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY=${QDB_REDEPLOY:-""} diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index cde8751b417072f3f0de53217dab99308ea882f3..9138da7c40809c2214a1c24f43843ab7e1333e95 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -19,9 +19,10 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException -from common.proto.context_pb2 import Device, DeviceId +from common.proto.context_pb2 import Device, DeviceId, TopologyId from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Device import json_device_id +from context.service.database.uuids.Topology import topology_get_uuid from .models.DeviceModel import DeviceModel from .models.EndPointModel import EndPointModel from .models.TopologyModel import TopologyDeviceModel @@ -73,6 +74,15 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]: topology_uuids : Set[str] = set() related_topologies : List[Dict] = list() + + # By default, always add device to default Context/Topology + _,topology_uuid = topology_get_uuid(TopologyId(), allow_random=False, allow_default=True) + related_topologies.append({ + 'topology_uuid': topology_uuid, + 'device_uuid' : device_uuid, + }) + topology_uuids.add(topology_uuid) + endpoints_data : List[Dict] = list() for i, endpoint in enumerate(request.device_endpoints): endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 5b537609b89bde3fe5812662379c629665ab56aa..5cabaea76972a799341541ce01ebc38628e834f5 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -27,7 +27,7 @@ from .driver_api.DriverInstanceCache import DriverInstanceCache, get_driver from .monitoring.MonitoringLoops import MonitoringLoops from .Tools import ( check_connect_rules, check_no_endpoints, compute_rules_to_add_delete, configure_rules, deconfigure_rules, - populate_config_rules, populate_endpoints, populate_initial_config_rules, subscribe_kpi, unsubscribe_kpi) + populate_config_rules, populate_endpoint_monitoring_resources, populate_endpoints, populate_initial_config_rules, subscribe_kpi, unsubscribe_kpi) LOGGER = logging.getLogger(__name__) @@ -86,6 +86,11 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): raise OperationFailedException('AddDevice', extra_details=errors) device_id = context_client.SetDevice(device) + + # Update endpoint monitoring resources with UUIDs + device_with_uuids = context_client.GetDevice(device_id) + populate_endpoint_monitoring_resources(device_with_uuids, self.monitoring_loops) + return device_id finally: self.mutex_queues.signal_done(device_uuid) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 9245e08a80fab36372dd008b4ed4f6b7e0d2330a..05610e1d2c2522bc167d43c7d0803f00e2472c9b 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json +import json, logging from typing import Any, Dict, List, Tuple, Union from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.method_wrappers.ServiceExceptions import InvalidArgumentException @@ -27,6 +27,8 @@ from .Errors import ( ERROR_BAD_ENDPOINT, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET, ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE) +LOGGER = logging.getLogger(__name__) + def check_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]: connection_config_rules = dict() unexpected_config_rules = list() @@ -107,6 +109,21 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon return errors +def populate_endpoint_monitoring_resources(device_with_uuids : Device, monitoring_loops : MonitoringLoops) -> None: + device_uuid = device_with_uuids.device_id.device_uuid.uuid + + for endpoint in device_with_uuids.device_endpoints: + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_name = endpoint.name + kpi_sample_types = endpoint.kpi_sample_types + for kpi_sample_type in kpi_sample_types: + monitor_resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_uuid, kpi_sample_type) + if monitor_resource_key is not None: continue + + monitor_resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_name, kpi_sample_type) + if monitor_resource_key is None: continue + monitoring_loops.add_resource_key(device_uuid, endpoint_uuid, kpi_sample_type, monitor_resource_key) + def _raw_config_rules_to_grpc( device_uuid : str, device_config : DeviceConfig, error_template : str, config_action : ConfigActionEnum, raw_config_rules : List[Tuple[str, Union[Any, Exception, None]]] @@ -118,6 +135,7 @@ def _raw_config_rules_to_grpc( errors.append(error_template.format(device_uuid, str(resource_key), str(resource_value))) continue + if resource_value is None: continue resource_value = json.loads(resource_value) if isinstance(resource_value, str) else resource_value resource_value = {field_name : (field_value, False) for field_name,field_value in resource_value.items()} update_config_rule_custom(device_config.config_rules, resource_key, resource_value, new_action=config_action) @@ -202,11 +220,12 @@ def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loo resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_uuid, kpi_sample_type) if resource_key is None: kpi_sample_type_name = KpiSampleType.Name(kpi_sample_type).upper().replace('KPISAMPLETYPE_', '') - return [ - ERROR_SAMPLETYPE.format( - str(device_uuid), str(endpoint_uuid), str(kpi_sample_type), str(kpi_sample_type_name) - ) - ] + MSG = ERROR_SAMPLETYPE.format( + str(device_uuid), str(endpoint_uuid), str(kpi_sample_type), str(kpi_sample_type_name) + ) + LOGGER.warning('{:s} Supported Device-Endpoint-KpiSampleType items: {:s}'.format( + MSG, str(monitoring_loops.get_all_resource_keys()))) + return [MSG] sampling_duration = request.sampling_duration_s # seconds sampling_interval = request.sampling_interval_s # seconds diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py index 4f5effce0a8b6156ce99a73b49b71f157d891286..5d2ac6d039bec76dc3f1a68e92362c327bce83dd 100644 --- a/src/device/service/drivers/emulated/EmulatedDriver.py +++ b/src/device/service/drivers/emulated/EmulatedDriver.py @@ -29,7 +29,7 @@ from .Tools import compose_resource_endpoint LOGGER = logging.getLogger(__name__) -RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'.*\/interface\[([^\]]+)\].*') +RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'^\/interface\[([^\]]+)\].*') HISTOGRAM_BUCKETS = ( # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF diff --git a/src/device/service/monitoring/MonitoringLoops.py b/src/device/service/monitoring/MonitoringLoops.py index 5763951fb2075e1975688eda0e49d24e10b0f697..d18f0dc7e54887aa879e44199fc2e49467a33c8d 100644 --- a/src/device/service/monitoring/MonitoringLoops.py +++ b/src/device/service/monitoring/MonitoringLoops.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, queue, threading +import copy, logging, queue, threading from typing import Dict, Optional, Tuple, Union from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.proto.monitoring_pb2 import Kpi @@ -93,6 +93,10 @@ class MonitoringLoops: key = (device_uuid, endpoint_uuid, kpi_sample_type) return self._device_endpoint_sampletype__to__resource_key.get(key) + def get_all_resource_keys(self) -> Dict[Tuple[str, str, int], str]: + with self._lock_device_endpoint: + return copy.deepcopy(self._device_endpoint_sampletype__to__resource_key) + def remove_resource_key( self, device_uuid : str, endpoint_uuid : str, kpi_sample_type : KpiSampleType ) -> None: diff --git a/src/monitoring/service/EventTools.py b/src/monitoring/service/EventTools.py index 221a0ddbfdbb65b1a908e134cc25f55e235b7564..cdf8afc04c8c714bcdc2517e13063d75bc837df2 100644 --- a/src/monitoring/service/EventTools.py +++ b/src/monitoring/service/EventTools.py @@ -12,18 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict import grpc, logging, queue, threading from common.method_wrappers.ServiceExceptions import ServiceException from common.proto import monitoring_pb2 -from common.proto.context_pb2 import Empty, EventTypeEnum +from common.proto.context_pb2 import DeviceOperationalStatusEnum, Empty, EventTypeEnum +from common.proto.kpi_sample_types_pb2 import KpiSampleType from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient from monitoring.service.MonitoringServiceServicerImpl import LOGGER +from monitoring.service.NameMapping import NameMapping LOGGER = logging.getLogger(__name__) +DEVICE_OP_STATUS_UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED +DEVICE_OP_STATUS_DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED +DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED +DEVICE_OP_STATUS_NOT_ENABLED = {DEVICE_OP_STATUS_UNDEFINED, DEVICE_OP_STATUS_DISABLED} +KPISAMPLETYPE_UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN + class EventsDeviceCollector: - def __init__(self) -> None: # pylint: disable=redefined-outer-name + def __init__(self, name_mapping : NameMapping) -> None: # pylint: disable=redefined-outer-name self._events_queue = queue.Queue() self._context_client_grpc = ContextClient() @@ -34,6 +43,9 @@ class EventsDeviceCollector: self._device_thread = threading.Thread(target=self._collect, args=(self._device_stream,), daemon=False) + self._device_to_state : Dict[str, DeviceOperationalStatusEnum] = dict() + self._name_mapping = name_mapping + def grpc_server_on(self): try: grpc.channel_ready_future(self._channel).result(timeout=15) @@ -52,7 +64,7 @@ class EventsDeviceCollector: def start(self): try: self._device_thread.start() - except RuntimeError as e: + except RuntimeError: LOGGER.exception('Start EventTools exception') def get_event(self, block : bool = True, timeout : float = 0.1): @@ -71,29 +83,51 @@ class EventsDeviceCollector: try: event = self.get_event(block=True, timeout=0.5) - if event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE: - device = self._context_client.GetDevice(event.device_id) - for j,end_point in enumerate(device.device_endpoints): - #for i, value in enumerate(kpi_sample_types_pb2.KpiSampleType.values()): - for i, value in enumerate(end_point.kpi_sample_types): - #if value == kpi_sample_types_pb2.KpiSampleType.KPISAMPLETYPE_UNKNOWN: continue + event_type = event.event.event_type + device_uuid = event.device_id.device_uuid.uuid + if event_type in {EventTypeEnum.EVENTTYPE_REMOVE}: + self._device_to_state.pop(device_uuid, None) + continue + + if event_type not in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}: + # Unknown event type + continue + + device = self._context_client.GetDevice(event.device_id) + self._name_mapping.set_device_name(device_uuid, device.name) + + old_operational_status = self._device_to_state.get(device_uuid, DEVICE_OP_STATUS_UNDEFINED) + device_was_not_enabled = (old_operational_status in DEVICE_OP_STATUS_NOT_ENABLED) - kpi_descriptor = monitoring_pb2.KpiDescriptor() + new_operational_status = device.device_operational_status + device_is_enabled = (new_operational_status == DEVICE_OP_STATUS_ENABLED) + self._device_to_state[device_uuid] = new_operational_status - kpi_descriptor.kpi_description = device.device_type - kpi_descriptor.kpi_sample_type = value - #kpi_descriptor.service_id.service_uuid.uuid = "" - kpi_descriptor.device_id.CopyFrom(device.device_id) - kpi_descriptor.endpoint_id.CopyFrom(end_point.endpoint_id) + activate_monitoring = device_was_not_enabled and device_is_enabled + if not activate_monitoring: + # device is not ready for monitoring + continue - kpi_id = self._monitoring_client.SetKpi(kpi_descriptor) - kpi_id_list.append(kpi_id) + for endpoint in device.device_endpoints: + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + self._name_mapping.set_endpoint_name(endpoint_uuid, endpoint.name) + + for value in endpoint.kpi_sample_types: + if value == KPISAMPLETYPE_UNKNOWN: continue + + kpi_descriptor = monitoring_pb2.KpiDescriptor() + kpi_descriptor.kpi_description = device.device_type + kpi_descriptor.kpi_sample_type = value + kpi_descriptor.device_id.CopyFrom(device.device_id) # pylint: disable=no-member + kpi_descriptor.endpoint_id.CopyFrom(endpoint.endpoint_id) # pylint: disable=no-member + + kpi_id = self._monitoring_client.SetKpi(kpi_descriptor) + kpi_id_list.append(kpi_id) except queue.Empty: break return kpi_id_list - except ServiceException as e: + except ServiceException: LOGGER.exception('ListenEvents exception') - except Exception as e: # pragma: no cover + except Exception: # pragma: no cover # pylint: disable=broad-except LOGGER.exception('ListenEvents exception') - diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py index 1d3888d5348bdbe2995f077310ca448827290382..a386d5f184694c87493681e7a31f7bc06301e50d 100644 --- a/src/monitoring/service/MetricsDBTools.py +++ b/src/monitoring/service/MetricsDBTools.py @@ -23,14 +23,18 @@ import datetime from common.tools.timestamp.Converters import timestamp_float_to_string, timestamp_utcnow_to_float import psycopg2 +from monitoring.service.NameMapping import NameMapping + LOGGER = logging.getLogger(__name__) class MetricsDB(): - def __init__(self, host, ilp_port=9009, rest_port=9000, table="monitoring", commit_lag_ms=1000, retries=10, - postgre=False, postgre_port=8812, postgre_user='admin', postgre_password='quest'): + def __init__(self, host, name_mapping : NameMapping, ilp_port=9009, rest_port=9000, table="monitoring", + commit_lag_ms=1000, retries=10, postgre=False, postgre_port=8812, postgre_user='admin', + postgre_password='quest'): try: self.host = host + self.name_mapping = name_mapping self.ilp_port = int(ilp_port) self.rest_port = rest_port self.table = table @@ -85,7 +89,9 @@ class MetricsDB(): '(kpi_id SYMBOL,' \ 'kpi_sample_type SYMBOL,' \ 'device_id SYMBOL,' \ + 'device_name SYMBOL,' \ 'endpoint_id SYMBOL,' \ + 'endpoint_name SYMBOL,' \ 'service_id SYMBOL,' \ 'slice_id SYMBOL,' \ 'connection_id SYMBOL,' \ @@ -100,6 +106,9 @@ class MetricsDB(): raise Exception def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, slice_id, connection_id, kpi_value): + device_name = self.name_mapping.get_device_name(device_id) or '' + endpoint_name = self.name_mapping.get_endpoint_name(endpoint_id) or '' + counter = 0 while (counter < self.retries): try: @@ -110,7 +119,9 @@ class MetricsDB(): 'kpi_id': kpi_id, 'kpi_sample_type': kpi_sample_type, 'device_id': device_id, + 'device_name': device_name, 'endpoint_id': endpoint_id, + 'endpoint_name': endpoint_name, 'service_id': service_id, 'slice_id': slice_id, 'connection_id': connection_id,}, diff --git a/src/monitoring/service/MonitoringService.py b/src/monitoring/service/MonitoringService.py index e2cbe2862894aec7b571ae857ad4c4fffa3c94c6..10611768a8cc91c45637f676536d1840114d8f33 100644 --- a/src/monitoring/service/MonitoringService.py +++ b/src/monitoring/service/MonitoringService.py @@ -17,12 +17,13 @@ from common.Settings import get_service_port_grpc from common.proto.monitoring_pb2_grpc import add_MonitoringServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService from monitoring.service.MonitoringServiceServicerImpl import MonitoringServiceServicerImpl +from monitoring.service.NameMapping import NameMapping class MonitoringService(GenericGrpcService): - def __init__(self, cls_name: str = __name__) -> None: + def __init__(self, name_mapping : NameMapping, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.MONITORING) super().__init__(port, cls_name=cls_name) - self.monitoring_servicer = MonitoringServiceServicerImpl() + self.monitoring_servicer = MonitoringServiceServicerImpl(name_mapping) def install_servicers(self): add_MonitoringServiceServicer_to_server(self.monitoring_servicer, self.server) diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py index c2bceefd794e3c5bd6acb35e41cef78dc1c205e9..bf9e7cabdd812680a97754da80b0bcb0ba1722e3 100644 --- a/src/monitoring/service/MonitoringServiceServicerImpl.py +++ b/src/monitoring/service/MonitoringServiceServicerImpl.py @@ -17,8 +17,6 @@ from queue import Queue from typing import Iterator -from common.Constants import ServiceNameEnum -from common.Settings import get_setting, get_service_port_grpc, get_service_host from common.logger import getJSONLogger from common.proto.context_pb2 import Empty from common.proto.device_pb2 import MonitoringSettings @@ -36,6 +34,7 @@ from device.client.DeviceClient import DeviceClient from prometheus_client import Counter, Summary from monitoring.service.AlarmManager import AlarmManager +from monitoring.service.NameMapping import NameMapping from monitoring.service.SubscriptionManager import SubscriptionManager LOGGER = getJSONLogger('monitoringservice-server') @@ -50,22 +49,15 @@ METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT") METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT") METRICSDB_TABLE = os.environ.get("METRICSDB_TABLE") -DEVICESERVICE_SERVICE_HOST = get_setting('DEVICESERVICE_SERVICE_HOST', default=get_service_host(ServiceNameEnum.DEVICE)) -DEVICESERVICE_SERVICE_PORT_GRPC = get_setting('DEVICESERVICE_SERVICE_PORT_GRPC', - default=get_service_port_grpc(ServiceNameEnum.DEVICE)) - - class MonitoringServiceServicerImpl(MonitoringServiceServicer): - def __init__(self): + def __init__(self, name_mapping : NameMapping): LOGGER.info('Init monitoringService') # Init sqlite monitoring db self.management_db = ManagementDBTools.ManagementDB('monitoring.db') - self.deviceClient = DeviceClient(host=DEVICESERVICE_SERVICE_HOST, - port=DEVICESERVICE_SERVICE_PORT_GRPC) # instantiate the client - - self.metrics_db = MetricsDBTools.MetricsDB(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, - METRICSDB_TABLE) + self.deviceClient = DeviceClient() + self.metrics_db = MetricsDBTools.MetricsDB( + METRICSDB_HOSTNAME, name_mapping, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE) self.subs_manager = SubscriptionManager(self.metrics_db) self.alarm_manager = AlarmManager(self.metrics_db) LOGGER.info('MetricsDB initialized') diff --git a/src/monitoring/service/NameMapping.py b/src/monitoring/service/NameMapping.py new file mode 100644 index 0000000000000000000000000000000000000000..57d7bfd4e0699a1998fa1b17bdbab863c193e984 --- /dev/null +++ b/src/monitoring/service/NameMapping.py @@ -0,0 +1,46 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +from typing import Dict, Optional + +class NameMapping: + def __init__(self) -> None: + self.__lock = threading.Lock() + self.__device_to_name : Dict[str, str] = dict() + self.__endpoint_to_name : Dict[str, str] = dict() + + def get_device_name(self, device_uuid : str) -> Optional[str]: + with self.__lock: + return self.__device_to_name.get(device_uuid) + + def get_endpoint_name(self, endpoint_uuid : str) -> Optional[str]: + with self.__lock: + return self.__endpoint_to_name.get(endpoint_uuid) + + def set_device_name(self, device_uuid : str, device_name : str) -> None: + with self.__lock: + self.__device_to_name[device_uuid] = device_name + + def set_endpoint_name(self, endpoint_uuid : str, endpoint_name : str) -> None: + with self.__lock: + self.__endpoint_to_name[endpoint_uuid] = endpoint_name + + def delete_device_name(self, device_uuid : str) -> None: + with self.__lock: + self.__device_to_name.pop(device_uuid, None) + + def delete_endpoint_name(self, endpoint_uuid : str) -> None: + with self.__lock: + self.__endpoint_to_name.pop(endpoint_uuid, None) diff --git a/src/monitoring/service/__main__.py b/src/monitoring/service/__main__.py index 78764ea64e39c48d927901ad88e7cff569e7447b..5483bf5639dd6e4d157c9e8c35d330af492896ef 100644 --- a/src/monitoring/service/__main__.py +++ b/src/monitoring/service/__main__.py @@ -21,6 +21,7 @@ from common.Settings import ( from common.proto import monitoring_pb2 from .EventTools import EventsDeviceCollector from .MonitoringService import MonitoringService +from .NameMapping import NameMapping terminate = threading.Event() LOGGER = None @@ -29,10 +30,10 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name LOGGER.warning('Terminate signal received') terminate.set() -def start_monitoring(): +def start_monitoring(name_mapping : NameMapping): LOGGER.info('Start Monitoring...',) - events_collector = EventsDeviceCollector() + events_collector = EventsDeviceCollector(name_mapping) events_collector.start() # TODO: redesign this method to be more clear and clean @@ -79,11 +80,13 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) + name_mapping = NameMapping() + # Starting monitoring service - grpc_service = MonitoringService() + grpc_service = MonitoringService(name_mapping) grpc_service.start() - start_monitoring() + start_monitoring(name_mapping) # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=0.1): pass diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index d892b98b8814ff3a23165fd6f893e8d136dba1e4..ed8f18558d3c7bca7850f7a74418bef1d305b7fe 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -26,8 +26,7 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) #from common.logger import getJSONLogger -from common.proto import monitoring_pb2 -from common.proto.context_pb2 import EventTypeEnum, DeviceEvent, Device, Empty +from common.proto.context_pb2 import DeviceOperationalStatusEnum, EventTypeEnum, DeviceEvent, Device, Empty from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, SubsDescriptor, SubsList, AlarmID, \ @@ -41,11 +40,12 @@ from device.service.DeviceService import DeviceService from device.service.driver_api.DriverFactory import DriverFactory from device.service.driver_api.DriverInstanceCache import DriverInstanceCache from monitoring.client.MonitoringClient import MonitoringClient -from monitoring.service import ManagementDBTools, MetricsDBTools #from monitoring.service.AlarmManager import AlarmManager from monitoring.service.EventTools import EventsDeviceCollector +from monitoring.service.ManagementDBTools import ManagementDB from monitoring.service.MetricsDBTools import MetricsDB from monitoring.service.MonitoringService import MonitoringService +from monitoring.service.NameMapping import NameMapping #from monitoring.service.SubscriptionManager import SubscriptionManager from monitoring.tests.Messages import create_kpi_request, create_kpi_request_d, include_kpi_request, monitor_kpi_request, \ create_kpi_request_c, kpi_query, subs_descriptor, alarm_descriptor, alarm_subscription #, create_kpi_request_b @@ -155,7 +155,8 @@ def monitoring_service( device_service : DeviceService # pylint: disable=redefined-outer-name,unused-argument ): LOGGER.info('Initializing MonitoringService...') - _service = MonitoringService() + name_mapping = NameMapping() + _service = MonitoringService(name_mapping) _service.start() # yield the server, when test finishes, execution will resume to stop it @@ -185,14 +186,15 @@ def monitoring_client(monitoring_service : MonitoringService): # pylint: disable @pytest.fixture(scope='session') def management_db(): - _management_db = ManagementDBTools.ManagementDB('monitoring.db') + _management_db = ManagementDB('monitoring.db') return _management_db @pytest.fixture(scope='session') -def metrics_db(): - _metrics_db = MetricsDBTools.MetricsDB( - METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE) - return _metrics_db +def metrics_db(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name + return monitoring_service.monitoring_servicer.metrics_db + #_metrics_db = MetricsDBTools.MetricsDB( + # METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE) + #return _metrics_db @pytest.fixture(scope='session') def subs_scheduler(): @@ -533,11 +535,12 @@ def test_events_tools( context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument device_client : DeviceClient, # pylint: disable=redefined-outer-name monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name,unused-argument + metrics_db : MetricsDB, # pylint: disable=redefined-outer-name ): LOGGER.warning('test_get_device_events begin') # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsDeviceCollector() + events_collector = EventsDeviceCollector(metrics_db.name_mapping) events_collector.start() # ----- Update the object ------------------------------------------------------------------------------------------ @@ -557,12 +560,13 @@ def test_get_device_events( context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument device_client : DeviceClient, # pylint: disable=redefined-outer-name monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name,unused-argument + metrics_db : MetricsDB, # pylint: disable=redefined-outer-name ): LOGGER.warning('test_get_device_events begin') # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsDeviceCollector() + events_collector = EventsDeviceCollector(metrics_db.name_mapping) events_collector.start() # ----- Check create event ----------------------------------------------------------------------------------------- @@ -586,12 +590,13 @@ def test_listen_events( context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument device_client : DeviceClient, # pylint: disable=redefined-outer-name monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name,unused-argument + metrics_db : MetricsDB, # pylint: disable=redefined-outer-name ): LOGGER.warning('test_listen_events begin') # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsDeviceCollector() + events_collector = EventsDeviceCollector(metrics_db.name_mapping) events_collector.start() LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID)) @@ -600,6 +605,15 @@ def test_listen_events( response = device_client.AddDevice(Device(**device_with_connect_rules)) assert response.device_uuid.uuid == DEVICE_DEV1_UUID + LOGGER.info('Activating Device {:s}'.format(DEVICE_DEV1_UUID)) + + device = context_client.GetDevice(response) + device_with_op_state = Device() + device_with_op_state.CopyFrom(device) + device_with_op_state.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + response = context_client.SetDevice(device_with_op_state) + assert response.device_uuid.uuid == DEVICE_DEV1_UUID + sleep(1.0) kpi_id_list = events_collector.listen_events() diff --git a/src/service/service/service_handler_api/Tools.py b/src/service/service/service_handler_api/Tools.py index ebd16a532c4ef4e74a61fd075afe9298755e26fb..90fa6098368633f7e44db37c01b75508d5e6e87a 100644 --- a/src/service/service/service_handler_api/Tools.py +++ b/src/service/service/service_handler_api/Tools.py @@ -13,9 +13,10 @@ # limitations under the License. import functools -from typing import Any, List, Union +from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.ServiceExceptions import NotFoundException from common.proto.context_pb2 import Device, EndPoint +from common.type_checkers.Checkers import chk_length, chk_type ACTION_MSG_SET_ENDPOINT = 'Set EndPoint(device_uuid={:s}, endpoint_uuid={:s}, topology_uuid={:s})' ACTION_MSG_DELETE_ENDPOINT = 'Delete EndPoint(device_uuid={:s}, endpoint_uuid={:s}, topology_uuid={:s})' @@ -51,3 +52,9 @@ def get_endpoint_matching(device : Device, endpoint_uuid_or_name : str) -> EndPo device_uuid = device.device_id.device_uuid.uuid extra_details = 'Device({:s})'.format(str(device_uuid)) raise NotFoundException('Endpoint', endpoint_uuid_or_name, extra_details=extra_details) + +def get_device_endpoint_uuids(endpoint : Tuple[str, str, Optional[str]]) -> Tuple[str, str]: + chk_type('endpoint', endpoint, (tuple, list)) + chk_length('endpoint', endpoint, min_length=2, max_length=3) + device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + return device_uuid, endpoint_uuid diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py index f12c9ab984205b9057dd1507114e5bc17d8deaa6..61fa08a26cf0eb776cb94e3998a7cb90cf663e59 100644 --- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py @@ -17,7 +17,7 @@ from common.tools.object_factory.ConfigRule import json_config_rule_delete, json from service.service.service_handler_api.AnyTreeTools import TreeNode def setup_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -38,7 +38,7 @@ def setup_config_rules( remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' - if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + if_cirid_name = '{:s}.{:s}'.format(endpoint_name, str(circuit_id)) network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) connection_point_id = 'VC-1' @@ -76,7 +76,7 @@ def setup_config_rules( return json_config_rules def teardown_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -97,7 +97,7 @@ def teardown_config_rules( #remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' - if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + if_cirid_name = '{:s}.{:s}'.format(endpoint_name, str(circuit_id)) network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) connection_point_id = 'VC-1' diff --git a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py index 66259d1f636c712bc41f282b4a5d947c57e01fc4..0d4a097cf32cbea7333e4c1693930294e2fb1f52 100644 --- a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py @@ -17,8 +17,8 @@ from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.proto.context_pb2 import ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id -from common.type_checkers.Checkers import chk_length, chk_type -from service.service.service_handler_api.Tools import get_endpoint_matching +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.task_scheduler.TaskExecutor import TaskExecutor @@ -64,16 +64,16 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): results = [] for endpoint in endpoints: try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name json_config_rules = setup_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: @@ -99,16 +99,16 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): results = [] for endpoint in endpoints: try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name json_config_rules = teardown_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: diff --git a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py index f12c9ab984205b9057dd1507114e5bc17d8deaa6..61fa08a26cf0eb776cb94e3998a7cb90cf663e59 100644 --- a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py @@ -17,7 +17,7 @@ from common.tools.object_factory.ConfigRule import json_config_rule_delete, json from service.service.service_handler_api.AnyTreeTools import TreeNode def setup_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -38,7 +38,7 @@ def setup_config_rules( remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' - if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + if_cirid_name = '{:s}.{:s}'.format(endpoint_name, str(circuit_id)) network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) connection_point_id = 'VC-1' @@ -76,7 +76,7 @@ def setup_config_rules( return json_config_rules def teardown_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -97,7 +97,7 @@ def teardown_config_rules( #remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' - if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + if_cirid_name = '{:s}.{:s}'.format(endpoint_name, str(circuit_id)) network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) connection_point_id = 'VC-1' diff --git a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py index 63442a6b46d3301ff15ee0d4416468f01d2f61a5..b8b30ad742223fb1a2aa4dc1e8a762b9e0fec386 100644 --- a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py @@ -17,8 +17,8 @@ from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.proto.context_pb2 import ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id -from common.type_checkers.Checkers import chk_length, chk_type -from service.service.service_handler_api.Tools import get_endpoint_matching +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.task_scheduler.TaskExecutor import TaskExecutor @@ -64,16 +64,16 @@ class L2NMOpenConfigServiceHandler(_ServiceHandler): results = [] for endpoint in endpoints: try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name json_config_rules = setup_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: @@ -99,16 +99,16 @@ class L2NMOpenConfigServiceHandler(_ServiceHandler): results = [] for endpoint in endpoints: try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name json_config_rules = teardown_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: diff --git a/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py index 3a5aff5884c72f1384666a223a3b07da6d4ae4ec..8479670940ef7eda1f5ce51cd42b3dc5e37e1a32 100644 --- a/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_emulated/ConfigRules.py @@ -17,7 +17,7 @@ from common.tools.object_factory.ConfigRule import json_config_rule_delete, json from service.service.service_handler_api.AnyTreeTools import TreeNode def setup_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -40,7 +40,7 @@ def setup_config_rules( vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) json_config_rules = [ json_config_rule_set( @@ -50,18 +50,18 @@ def setup_config_rules( #'router_id': router_id, 'address_families': address_families, }), json_config_rule_set( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu, + '/interface[{:s}]'.format(endpoint_name), { + 'name': endpoint_name, 'description': network_interface_desc, 'mtu': mtu, }), json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { + 'name': endpoint_name, 'index': sub_interface_index, 'description': network_subinterface_desc, 'vlan_id': vlan_id, 'address_ip': address_ip, 'address_prefix': address_prefix, }), json_config_rule_set( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid, + 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_name, 'subinterface': sub_interface_index, }), json_config_rule_set( @@ -138,7 +138,7 @@ def setup_config_rules( return json_config_rules def teardown_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -157,7 +157,7 @@ def teardown_config_rules( #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) service_short_uuid = service_uuid.split('-')[-1] network_instance_name = '{:s}-NetInst'.format(service_short_uuid) #network_interface_desc = '{:s}-NetIf'.format(service_uuid) @@ -169,12 +169,12 @@ def teardown_config_rules( 'name': network_instance_name, 'id': if_subif_name, }), json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { + 'name': endpoint_name, 'index': sub_interface_index, }), json_config_rule_delete( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, + '/interface[{:s}]'.format(endpoint_name), { + 'name': endpoint_name, }), json_config_rule_delete( '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( diff --git a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py index 8a39ed47463d70bf2c2c42cbb9308ba5e072caf4..412023a69c4f59865f907001e53a30648df96d53 100644 --- a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py @@ -17,8 +17,8 @@ from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.proto.context_pb2 import ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id -from common.type_checkers.Checkers import chk_length, chk_type -from service.service.service_handler_api.Tools import get_endpoint_matching +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.task_scheduler.TaskExecutor import TaskExecutor @@ -55,32 +55,25 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - LOGGER.info('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) - LOGGER.info('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) - chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] service_uuid = self.__service.service_id.service_uuid.uuid settings = self.__settings_handler.get('/settings') - LOGGER.info('[SetEndpoint] settings={:s}'.format(str(settings))) results = [] for endpoint in endpoints: - LOGGER.info('[SetEndpoint] endpoint={:s}'.format(str(endpoint))) try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) - LOGGER.info('[SetEndpoint] endpoint_settings={:s}'.format(str(endpoint_settings))) + endpoint_name = endpoint_obj.name json_config_rules = setup_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) - LOGGER.info('[SetEndpoint] json_config_rules={:s}'.format(str(json_config_rules))) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: @@ -91,16 +84,12 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) results.append(e) - LOGGER.info('[SetEndpoint] results={:s}'.format(str(results))) return results @metered_subclass_method(METRICS_POOL) def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - LOGGER.info('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) - LOGGER.info('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) - chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] @@ -110,16 +99,16 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): results = [] for endpoint in endpoints: try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name json_config_rules = teardown_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py index 3a5aff5884c72f1384666a223a3b07da6d4ae4ec..8479670940ef7eda1f5ce51cd42b3dc5e37e1a32 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py @@ -17,7 +17,7 @@ from common.tools.object_factory.ConfigRule import json_config_rule_delete, json from service.service.service_handler_api.AnyTreeTools import TreeNode def setup_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -40,7 +40,7 @@ def setup_config_rules( vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) json_config_rules = [ json_config_rule_set( @@ -50,18 +50,18 @@ def setup_config_rules( #'router_id': router_id, 'address_families': address_families, }), json_config_rule_set( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu, + '/interface[{:s}]'.format(endpoint_name), { + 'name': endpoint_name, 'description': network_interface_desc, 'mtu': mtu, }), json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { + 'name': endpoint_name, 'index': sub_interface_index, 'description': network_subinterface_desc, 'vlan_id': vlan_id, 'address_ip': address_ip, 'address_prefix': address_prefix, }), json_config_rule_set( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid, + 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_name, 'subinterface': sub_interface_index, }), json_config_rule_set( @@ -138,7 +138,7 @@ def setup_config_rules( return json_config_rules def teardown_config_rules( - service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: @@ -157,7 +157,7 @@ def teardown_config_rules( #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) service_short_uuid = service_uuid.split('-')[-1] network_instance_name = '{:s}-NetInst'.format(service_short_uuid) #network_interface_desc = '{:s}-NetIf'.format(service_uuid) @@ -169,12 +169,12 @@ def teardown_config_rules( 'name': network_instance_name, 'id': if_subif_name, }), json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { + 'name': endpoint_name, 'index': sub_interface_index, }), json_config_rule_delete( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, + '/interface[{:s}]'.format(endpoint_name), { + 'name': endpoint_name, }), json_config_rule_delete( '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( diff --git a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py index 3dc98f71b3f64557782b700220d1d3ab84314b4b..7c75ff1c769f418e394092ee60b186665ae6670f 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py @@ -17,8 +17,8 @@ from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.proto.context_pb2 import ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id -from common.type_checkers.Checkers import chk_length, chk_type -from service.service.service_handler_api.Tools import get_endpoint_matching +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.task_scheduler.TaskExecutor import TaskExecutor @@ -64,16 +64,16 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): results = [] for endpoint in endpoints: try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name json_config_rules = setup_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: @@ -99,16 +99,16 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): results = [] for endpoint in endpoints: try: - chk_type('endpoint', endpoint, (tuple, list)) - chk_length('endpoint', endpoint, min_length=2, max_length=3) - device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name json_config_rules = teardown_config_rules( - service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, endpoint_settings) del device_obj.device_config.config_rules[:] for json_config_rule in json_config_rules: diff --git a/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py index a16f8cdfad5524a45c36502610d615f3b5dbbba4..bfc2122226221c00d1ada545b512f07401360515 100644 --- a/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py +++ b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py @@ -19,6 +19,7 @@ from common.proto.context_pb2 import ConfigRule, DeviceId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.task_scheduler.TaskExecutor import TaskExecutor @@ -45,8 +46,7 @@ class MicrowaveServiceHandler(_ServiceHandler): def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - LOGGER.info('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) - LOGGER.info('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] @@ -57,12 +57,21 @@ class MicrowaveServiceHandler(_ServiceHandler): results = [] try: - # endpoints are retrieved in the following format --> '/endpoints/endpoint[172.26.60.243:9]' - node_id_src, tp_id_src = check_endpoint(endpoints[0][1], service_uuid) - node_id_dst, tp_id_dst = check_endpoint(endpoints[1][1], service_uuid) - - device_uuid = endpoints[0][0] + device_uuid_src, endpoint_uuid_src = get_device_endpoint_uuids(endpoints[0]) + device_uuid_dst, endpoint_uuid_dst = get_device_endpoint_uuids(endpoints[1]) + + if device_uuid_src != device_uuid_dst: + raise Exception('Diferent Src-Dst devices not supported by now') + device_uuid = device_uuid_src + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + endpoint_name_src = get_endpoint_matching(device_obj, endpoint_uuid_src).name + endpoint_name_dst = get_endpoint_matching(device_obj, endpoint_uuid_dst).name + + # endpoints are retrieved in the following format --> '/endpoints/endpoint[172.26.60.243:9]' + node_id_src, tp_id_src = check_endpoint(endpoint_name_src, service_uuid) + node_id_dst, tp_id_dst = check_endpoint(endpoint_name_dst, service_uuid) + json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), { 'uuid' : service_uuid, 'node_id_src': node_id_src, @@ -85,8 +94,6 @@ class MicrowaveServiceHandler(_ServiceHandler): def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - LOGGER.info('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) - LOGGER.info('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] @@ -95,8 +102,15 @@ class MicrowaveServiceHandler(_ServiceHandler): results = [] try: - device_uuid = endpoints[0][0] + device_uuid_src, _ = get_device_endpoint_uuids(endpoints[0]) + device_uuid_dst, _ = get_device_endpoint_uuids(endpoints[1]) + + if device_uuid_src != device_uuid_dst: + raise Exception('Diferent Src-Dst devices not supported by now') + device_uuid = device_uuid_src + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + json_config_rule = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), { 'uuid': service_uuid }) diff --git a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py index d8a4668bbf102fa5b5f8c9e9f542f34b063bc819..6b1c4a6572e623beb3cf2011c075a8e546f52b1b 100644 --- a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py +++ b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py @@ -19,6 +19,7 @@ from common.proto.context_pb2 import ConfigRule, DeviceId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.task_scheduler.TaskExecutor import TaskExecutor @@ -39,8 +40,7 @@ class TapiServiceHandler(_ServiceHandler): def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - LOGGER.info('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) - LOGGER.info('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] @@ -55,12 +55,21 @@ class TapiServiceHandler(_ServiceHandler): results = [] try: - device_uuid = endpoints[0][0] + device_uuid_src, endpoint_uuid_src = get_device_endpoint_uuids(endpoints[0]) + device_uuid_dst, endpoint_uuid_dst = get_device_endpoint_uuids(endpoints[1]) + + if device_uuid_src != device_uuid_dst: + raise Exception('Diferent Src-Dst devices not supported by now') + device_uuid = device_uuid_src + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) - json_config_rule = json_config_rule_set('/service[{:s}]'.format(service_uuid), { + endpoint_name_src = get_endpoint_matching(device_obj, endpoint_uuid_src).name + endpoint_name_dst = get_endpoint_matching(device_obj, endpoint_uuid_dst).name + + json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), { 'uuid' : service_uuid, - 'input_sip' : endpoints[0][1], - 'output_sip' : endpoints[1][1], + 'input_sip' : endpoint_name_src, + 'output_sip' : endpoint_name_dst, 'capacity_unit' : capacity_unit, 'capacity_value' : capacity_value, 'layer_protocol_name' : layer_proto_name, @@ -81,8 +90,6 @@ class TapiServiceHandler(_ServiceHandler): def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - LOGGER.info('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) - LOGGER.info('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] @@ -91,9 +98,16 @@ class TapiServiceHandler(_ServiceHandler): results = [] try: - device_uuid = endpoints[0][0] + device_uuid_src, _ = get_device_endpoint_uuids(endpoints[0]) + device_uuid_dst, _ = get_device_endpoint_uuids(endpoints[1]) + + if device_uuid_src != device_uuid_dst: + raise Exception('Diferent Src-Dst devices not supported by now') + device_uuid = device_uuid_src + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) - json_config_rule = json_config_rule_delete('/service[{:s}]'.format(service_uuid), { + + json_config_rule = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), { 'uuid': service_uuid }) del device_obj.device_config.config_rules[:] diff --git a/src/webui/grafana_dashboard_psql.json b/src/webui/grafana_dashboard_psql.json index aa2676e26a0336c8279a658dbbdabaafa9c6b4d0..ec89c1647cc1086140b0bbd35354546c405ce910 100644 --- a/src/webui/grafana_dashboard_psql.json +++ b/src/webui/grafana_dashboard_psql.json @@ -26,14 +26,14 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "iteration": 1664814762635, + "iteration": 1675103296430, "links": [], "liveNow": false, "panels": [ { "datasource": { "type": "postgres", - "uid": "monitoringdb" + "uid": "questdb" }, "fieldConfig": { "defaults": { @@ -162,14 +162,14 @@ { "datasource": { "type": "postgres", - "uid": "monitoringdb" + "uid": "questdb" }, "format": "time_series", "group": [], "hide": false, "metricColumn": "kpi_value", "rawQuery": true, - "rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_id, endpoint_id, kpi_sample_type\r\nFROM\r\n monitoring\r\nWHERE\r\n $__timeFilter(timestamp) AND device_id IN ($device_id) AND endpoint_id IN ($endpoint_id) AND kpi_sample_type IN ($kpi_sample_type)\r\nGROUP BY\r\n device_id, endpoint_id, kpi_sample_type\r\nORDER BY\r\n timestamp\r\n", + "rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_name, endpoint_name, kpi_sample_type\r\nFROM\r\n tfs_monitoring\r\nWHERE\r\n $__timeFilter(timestamp) AND device_name IN (${device_name}) AND endpoint_name IN (${endpoint_name}) AND kpi_sample_type IN (${kpi_sample_type})\r\nGROUP BY\r\n device_name, endpoint_name, kpi_sample_type\r\nORDER BY\r\n timestamp", "refId": "A", "select": [ [ @@ -201,8 +201,8 @@ { "id": "renameByRegex", "options": { - "regex": "metric {device_id=\\\"([^\\\"]+)\\\", endpoint_id=\\\"([^\\\"]+)\\\", kpi_sample_type=\\\"([^\\\"]+)\\\"}", - "renamePattern": "$3 ($1 $2)" + "regex": "metric {device_name=\\\"([^\\\"]+)\\\", endpoint_name=\\\"([^\\\"]+)\\\", kpi_sample_type=\\\"([^\\\"]+)\\\"}", + "renamePattern": "$3 ($1 : $2)" } } ], @@ -227,16 +227,16 @@ }, "datasource": { "type": "postgres", - "uid": "monitoringdb" + "uid": "questdb" }, - "definition": "SELECT DISTINCT device_id FROM monitoring;", + "definition": "SELECT DISTINCT device_name FROM tfs_monitoring;", "hide": 0, "includeAll": true, "label": "Device", "multi": true, - "name": "device_id", + "name": "device_name", "options": [], - "query": "SELECT DISTINCT device_id FROM monitoring;", + "query": "SELECT DISTINCT device_name FROM tfs_monitoring;", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -245,22 +245,26 @@ }, { "current": { - "selected": false, - "text": "All", - "value": "$__all" + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] }, "datasource": { "type": "postgres", - "uid": "monitoringdb" + "uid": "questdb" }, - "definition": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})", + "definition": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})", "hide": 0, "includeAll": true, "label": "EndPoint", "multi": true, - "name": "endpoint_id", + "name": "endpoint_name", "options": [], - "query": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})", + "query": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -271,26 +275,24 @@ "current": { "selected": true, "text": [ - "PACKETS_RECEIVED", - "PACKETS_TRANSMITTED" + "All" ], "value": [ - "PACKETS_RECEIVED", - "PACKETS_TRANSMITTED" + "$__all" ] }, "datasource": { "type": "postgres", - "uid": "monitoringdb" + "uid": "questdb" }, - "definition": "SELECT DISTINCT kpi_sample_type FROM monitoring;", + "definition": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;", "hide": 0, "includeAll": true, "label": "Kpi Sample Type", "multi": true, "name": "kpi_sample_type", "options": [], - "query": "SELECT DISTINCT kpi_sample_type FROM monitoring;", + "query": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -300,14 +302,14 @@ ] }, "time": { - "from": "now-15m", + "from": "now-5m", "to": "now" }, "timepicker": {}, - "timezone": "", + "timezone": "utc", "title": "L3 Monitoring", "uid": "tf-l3-monit", - "version": 1, + "version": 6, "weekStart": "" } } diff --git a/src/webui/service/device/forms.py b/src/webui/service/device/forms.py index cfa741ab306dd4a0c7ddc0272a3680891bfe1597..e0af206b2db910fe43951d0598fcadd857283b77 100644 --- a/src/webui/service/device/forms.py +++ b/src/webui/service/device/forms.py @@ -27,7 +27,7 @@ class AddDeviceForm(FlaskForm): # choices=[(-1, 'Select...'), (0, 'Undefined'), (1, 'Disabled'), (2, 'Enabled')], coerce=int, validators=[NumberRange(min=0)]) - device_drivers_undefined = BooleanField('UNDEFINED') + device_drivers_undefined = BooleanField('UNDEFINED / EMULATED') device_drivers_openconfig = BooleanField('OPENCONFIG') device_drivers_transport_api = BooleanField('TRANSPORT_API') device_drivers_p4 = BooleanField('P4') diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index 140efe4b053d908c99e34551ddf6ba59d03b8e77..baff30348291724dad3f88557af9922741e688f1 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -12,14 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for from common.proto.context_pb2 import ( - ConfigActionEnum, ConfigRule, - Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, - Empty, TopologyId, ContextId) + ConfigActionEnum, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, Empty, TopologyId) from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Topology import json_topology_id -from common.tools.context_queries.Device import add_device_to_topology from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from webui.service.device.forms import AddDeviceForm @@ -62,7 +60,7 @@ def add(): # listing enum values form.operational_status.choices = [] - for key, value in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items(): + for key, _ in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items(): form.operational_status.choices.append( (DeviceOperationalStatusEnum.Value(key), key.replace('DEVICEOPERATIONALSTATUS_', ''))) @@ -71,58 +69,61 @@ def add(): form.device_type.choices.append((device_type.value,device_type.value)) if form.validate_on_submit(): - device = Device() + device_obj = Device() # Device UUID: - device.device_id.device_uuid.uuid = form.device_id.data + device_obj.device_id.device_uuid.uuid = form.device_id.data # Device type: - device.device_type = str(form.device_type.data) + device_obj.device_type = str(form.device_type.data) # Device configurations: - config_rule = device.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/address' config_rule.custom.resource_value = form.device_config_address.data - config_rule = device.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/port' config_rule.custom.resource_value = form.device_config_port.data - config_rule = device.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/settings' - config_rule.custom.resource_value = form.device_config_settings.data + + try: + device_config_settings = json.loads(form.device_config_settings.data) + except: # pylint: disable=bare-except + device_config_settings = form.device_config_settings.data + + if isinstance(device_config_settings, dict): + config_rule.custom.resource_value = json.dumps(device_config_settings) + else: + config_rule.custom.resource_value = str(device_config_settings) # Device status: - device.device_operational_status = form.operational_status.data + device_obj.device_operational_status = form.operational_status.data # Device drivers: if form.device_drivers_undefined.data: - device.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED) + device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED) if form.device_drivers_openconfig.data: - device.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG) + device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG) if form.device_drivers_transport_api.data: - device.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API) + device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API) if form.device_drivers_p4.data: - device.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4) + device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4) if form.device_drivers_ietf_network_topology.data: - device.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY) + device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY) if form.device_drivers_onf_tr_352.data: - device.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352) + device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352) if form.device_drivers_xr.data: - device.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR) + device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR) try: device_client.connect() - response: DeviceId = device_client.AddDevice(device) + response: DeviceId = device_client.AddDevice(device_obj) device_client.close() - context_uuid = session['context_uuid'] - topology_uuid = session['topology_uuid'] - context_client.connect() - context_id = ContextId(**json_context_id(context_uuid)) - add_device_to_topology(context_client, context_id, topology_uuid, device.device_id.device_uuid.uuid) - context_client.close() flash(f'New device was created with ID "{response.device_uuid.uuid}".', 'success') return redirect(url_for('device.home')) except Exception as e: