diff --git a/src/bgpls_speaker/Dockerfile b/src/bgpls_speaker/Dockerfile index d90ebf82b0d7d1949471db51ca0e1dd5c4ed1e80..dec0c6590efd0e3aa032bcda0bda2e533c0dd0fd 100644 --- a/src/bgpls_speaker/Dockerfile +++ b/src/bgpls_speaker/Dockerfile @@ -16,56 +16,22 @@ # Maven install stage # # ---------------------------------------------- -FROM alpine/git:latest AS repo +# FROM alpine/git:latest AS repo -WORKDIR /usr/src/app -RUN git clone https://github.com/telefonicaid/netphony-network-protocols.git . +# WORKDIR /usr/src/app +# RUN git clone https://github.com/telefonicaid/netphony-network-protocols.git . FROM maven:3.8.8-eclipse-temurin-17 AS build -# RUN mkdir -p /var/teraflow/java_speaker/ -# WORKDIR /var/teraflow/java_speaker -# RUN mkdir -p protocols/ -# COPY ../netphony-network-protocols/src protocols/src -# COPY ../netphony-network-protocols/pom.xml protocols/pom.xml - -# RUN mvn dependency:resolve -# RUN mvn clean verify - -# WORKDIR /var/teraflow/java_speaker/protocols -# RUN mvn clean install -COPY --from=repo /usr/src/app/* /protocols/ - -WORKDIR /protocols/ - -# RUN mvn package -X - -# RUN mvn org.apache.maven.plugins:maven-install-plugin:2.5.2:install-file \ -# -Dfile=/protocols/target/network-protocols-1.1-SNAPSHOT.jar \ -# -DgroupId=es.tid.netphony \ -# -DartifactId=network-protocols \ -# -Dversion=1.1-SNAPSHOT \ -# -Dpackaging=jar \ -# -DlocalRepositoryPath=/ - -RUN mvn clean install -X - WORKDIR / -RUN mkdir -p /var/teraflow/bgpls -WORKDIR /var/teraflow/bgpls - - -# COPY . /var/teraflow/bgpls/ - -COPY src/bgpls_speaker/service/java/netphony-topology/ /var/teraflow/bgpls/service/java/netphony-topology/ -COPY src/bgpls_speaker/service/java/netphony-topology/pom.xml pom.xml - -WORKDIR /var/teraflow/bgpls/service/java/netphony-topology/ - -# RUN mvn clean compile -DskipTests -Dmaven.repo.local=$HOME/.m2/es/tid/netphony/network-protocols -# RUN mvn package -P bgp-ls-speaker assembly:single -DskipTests - -# ENTRYPOINT [ "ls","-R" ] +COPY src/bgpls_speaker/service/java/netphony-topology/ netphony-topology/ +COPY src/bgpls_speaker/service/java/netphony-topology/pom.xml netphony-topology/pom.xml + +WORKDIR /netphony-topology/ +RUN mvn clean compile -DskipTests -X +RUN mvn package assembly:single -P bgp-ls-speaker -DskipTests +WORKDIR /netphony-topology/target/ +# ENTRYPOINT [ "ls" ,"-a"] # ------------------------------------------- # jar created in /netphony-topology/target/bgp-ls-speaker-jar-with-dependencies.jar @@ -131,7 +97,7 @@ RUN python3 -m pip install -r requirements.txt WORKDIR /var/teraflow/bgpls_speaker RUN mkdir -p /java COPY src/bgpls_speaker/service/java/* /java/ -COPY --from=build /var/teraflow/bgpls/service/java/netphony-topology/target/bgp-ls-speaker-jar-with-dependencies.jar /var/teraflow/bgpls_speaker/bgp_ls.jar +COPY --from=build /netphony-topology/target/bgp-ls-speaker-jar-with-dependencies.jar /var/teraflow/bgpls_speaker/service/java/bgp_ls.jar # Add component files into working directory WORKDIR /var/teraflow diff --git a/src/bgpls_speaker/quick_deploy.sh b/src/bgpls_speaker/quick_deploy.sh new file mode 100644 index 0000000000000000000000000000000000000000..7276f3287d6523d35ce99362963c6f45dfde911c --- /dev/null +++ b/src/bgpls_speaker/quick_deploy.sh @@ -0,0 +1,438 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# If not already set, set the URL of the Docker registry where the images will be uploaded to. +# By default, assume internal MicroK8s registry is used. +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} + +# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. +# By default, only basic components are deployed +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator bgpls_speaker"} + +# If not already set, set the tag you want to use for your images. +export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} + +# If not already set, set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +# If not already set, set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} + +# If not already set, set the new Grafana admin password +export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} + +# If not already set, disable skip-build flag to rebuild the Docker images. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-"YES"} + +# If TFS_SKIP_BUILD is "YES", select the containers to be build +# Any other container will use previous docker images +export TFS_QUICK_COMPONENTS="bgpls_speaker" + +# ----- CockroachDB ------------------------------------------------------------ + +# If not already set, set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} + +# If not already set, set the database username to be used by Context. +export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"} + +# If not already set, set the database user's password to be used by Context. +export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} + +# If not already set, set the database name to be used by Context. +export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + + +# ----- NATS ------------------------------------------------------------------- + +# If not already set, set the namespace where NATS will be deployed. +export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} + + +# ----- QuestDB ---------------------------------------------------------------- + +# If not already set, set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} + +# If not already set, set the database username to be used for QuestDB. +export QDB_USERNAME=${QDB_USERNAME:-"admin"} + +# If not already set, set the database user's password to be used for QuestDB. +export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} + +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} + +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Constants +GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" +TMP_FOLDER="./tmp" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" +mkdir -p $TMP_MANIFESTS_FOLDER +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +mkdir -p $TMP_LOGS_FOLDER + +echo "Deleting and Creating a new namespace..." +kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found +kubectl create namespace $TFS_K8S_NAMESPACE +printf "\n" + +echo "Create secret with CockroachDB data" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with NATS data" +NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') +kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \ + --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} +printf "\n" + +echo "Create secret with QuestDB data" +QDB_HTTP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') +QDB_ILP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="ilp")].port}') +QDB_SQL_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +METRICSDB_HOSTNAME="questdb-public.${QDB_NAMESPACE}.svc.cluster.local" +kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=QDB_NAMESPACE=${QDB_NAMESPACE} \ + --from-literal=METRICSDB_HOSTNAME=${METRICSDB_HOSTNAME} \ + --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \ + --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \ + --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \ + --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \ + --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \ + --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \ + --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} +printf "\n" + +echo "Deploying components and collecting environment variables..." +ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh +echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT +PYTHONPATH=$(pwd)/src +echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT + +for COMPONENT in $TFS_COMPONENTS; do + echo "Processing '$COMPONENT' component..." + + if [ "$TFS_SKIP_BUILD" != "YES" ]; then + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + elif [ "$COMPONENT" == "pathcomp" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" + docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" + docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" + docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" + docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" + else + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" + fi + + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." + + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" + docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" + docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi + else + for QUICK_COMPONENT in $TFS_QUICK_COMPONENTS; do + if [ "$COMPONENT" == "$QUICK_COMPONENT" ]; then + + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${QUICK_COMPONENT}.log" + + docker build -t "$QUICK_COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$QUICK_COMPONENT"/Dockerfile . > "$BUILD_LOG" + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." + + + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$QUICK_COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${QUICK_COMPONENT}.log" + docker tag "$QUICK_COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${QUICK_COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi + done + fi + + echo " Adapting '$COMPONENT' manifest file..." + MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" + cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" + + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + fi + + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" + + # TODO: harmonize names of the monitoring component + + echo " Deploying '$COMPONENT' component to Kubernetes..." + DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" + kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") + #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + + echo " Collecting env-vars for '$COMPONENT' component..." + + SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME}service --namespace $TFS_K8S_NAMESPACE -o json) + if [ -z "${SERVICE_DATA}" ]; then continue; fi + + # Env vars for service's host address + SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP') + if [ -z "${SERVICE_HOST}" ]; then continue; fi + ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT + + # Env vars for service's 'grpc' port (if any) + SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port') + if [ -n "${SERVICE_PORT_GRPC}" ]; then + ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT + fi + + # Env vars for service's 'http' port (if any) + SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port') + if [ -n "${SERVICE_PORT_HTTP}" ]; then + ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT + fi + + printf "\n" +done + +echo "Deploying extra manifests..." +for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do + echo "Processing manifest '$EXTRA_MANIFEST'..." + if [[ "$EXTRA_MANIFEST" == *"servicemonitor"* ]]; then + kubectl apply -f $EXTRA_MANIFEST + else + kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST + fi + printf "\n" +done +printf "\n" + +for COMPONENT in $TFS_COMPONENTS; do + echo "Waiting for '$COMPONENT' component..." + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") + kubectl wait --namespace $TFS_K8S_NAMESPACE \ + --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service + printf "\n" +done + +if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then + echo "Configuring WebUI DataStores and Dashboards..." + sleep 5 + + # Exposed through the ingress controller "tfs-ingress" + GRAFANA_URL="127.0.0.1:80/grafana" + + # Default Grafana credentials + GRAFANA_USERNAME="admin" + GRAFANA_PASSWORD="admin" + + # Configure Grafana Admin Password + # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password + GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}" + + echo ">> Updating Grafana 'admin' password..." + curl -X PUT -H "Content-Type: application/json" -d '{ + "oldPassword": "'${GRAFANA_PASSWORD}'", + "newPassword": "'${TFS_GRAFANA_PASSWORD}'", + "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'" + }' ${GRAFANA_URL_DEFAULT}/api/user/password + echo + echo + + # Updated Grafana API URL + GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}" + echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT + + echo ">> Installing Scatter Plot plugin..." + curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \ + ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install + echo + + # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/ + QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}" + echo ">> Creating datasources..." + curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ + "access" : "proxy", + "type" : "postgres", + "name" : "questdb-mon-kpi", + "url" : "'${QDB_HOST_PORT}'", + "database" : "'${QDB_TABLE_MONITORING_KPIS}'", + "user" : "'${QDB_USERNAME}'", + "basicAuth": false, + "isDefault": true, + "jsonData" : { + "sslmode" : "disable", + "postgresVersion" : 1100, + "maxOpenConns" : 0, + "maxIdleConns" : 2, + "connMaxLifetime" : 14400, + "tlsAuth" : false, + "tlsAuthWithCACert" : false, + "timescaledb" : false, + "tlsConfigurationMethod": "file-path", + "tlsSkipVerify" : true + }, + "secureJsonData": {"password": "'${QDB_PASSWORD}'"} + }' ${GRAFANA_URL_UPDATED}/api/datasources + echo + + curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ + "access" : "proxy", + "type" : "postgres", + "name" : "questdb-slc-grp", + "url" : "'${QDB_HOST_PORT}'", + "database" : "'${QDB_TABLE_SLICE_GROUPS}'", + "user" : "'${QDB_USERNAME}'", + "basicAuth": false, + "isDefault": false, + "jsonData" : { + "sslmode" : "disable", + "postgresVersion" : 1100, + "maxOpenConns" : 0, + "maxIdleConns" : 2, + "connMaxLifetime" : 14400, + "tlsAuth" : false, + "tlsAuthWithCACert" : false, + "timescaledb" : false, + "tlsConfigurationMethod": "file-path", + "tlsSkipVerify" : true + }, + "secureJsonData": {"password": "'${QDB_PASSWORD}'"} + }' ${GRAFANA_URL_UPDATED}/api/datasources + printf "\n\n" + + echo ">> Creating dashboards..." + # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/ + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \ + ${GRAFANA_URL_UPDATED}/api/dashboards/db + echo + + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \ + ${GRAFANA_URL_UPDATED}/api/dashboards/db + printf "\n\n" + + echo ">> Staring dashboards..." + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit" + DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') + curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} + echo + + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps" + DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') + curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} + echo + + printf "\n\n" +fi diff --git a/src/bgpls_speaker/service/BgplsServiceServicerImpl.py b/src/bgpls_speaker/service/BgplsServiceServicerImpl.py index 5e61afafbf2e574d303fcb2022d346808461fb2c..d5db1bfec4bf8cb5cf017a33fe499d68a514a5a8 100644 --- a/src/bgpls_speaker/service/BgplsServiceServicerImpl.py +++ b/src/bgpls_speaker/service/BgplsServiceServicerImpl.py @@ -61,8 +61,8 @@ class BgplsServiceServicerImpl(BgplsServiceServicer): links = self.discoveredDB.GetLinksFromDiscoveredDB() links_info=[] for link in links: - local=NodeDescriptors(bgplsID=link.local.bgpls_id,nodeName=link.local.node_name) - remote=NodeDescriptors(bgplsID=link.remote.bgpls_id,nodeName=link.remote.node_name) + local=NodeDescriptors(igp_id=link.local_id,nodeName=link.local_id) + remote=NodeDescriptors(igp_id=link.remote_id,nodeName=link.remote_id) links_info.append(DiscoveredLink(local=local,remote=remote,learntFrom=link.learnt_from)) return DiscoveredLinkList(discoveredlinks=links_info) diff --git a/src/bgpls_speaker/service/java/netphony-topology/pom.xml b/src/bgpls_speaker/service/java/netphony-topology/pom.xml index 0b991f2afb6c4db51887dba52d61e36f22b29083..be949361d720695e225e4c9fc08c9730f453fd8a 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/pom.xml +++ b/src/bgpls_speaker/service/java/netphony-topology/pom.xml @@ -1,17 +1,3 @@ -<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. --> - <?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> @@ -27,6 +13,19 @@ limitations under the License. --> <license> <name>Apache License 2.0</name> <url>http://www.apache.org/licenses/LICENSE-2.0</url> + <!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) --> + + <!-- Licensed under the Apache License, Version 2.0 (the "License"); --> + <!-- you may not use this file except in compliance with the License. --> + <!-- You may obtain a copy of the License at --> + + <!-- http://www.apache.org/licenses/LICENSE-2.0 --> + + <!-- Unless required by applicable law or agreed to in writing, software --> + <!-- distributed under the License is distributed on an "AS IS" BASIS, --> + <!-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --> + <!-- See the License for the specific language governing permissions and --> + <!-- limitations under the License. --> </license> </licenses> <dependencies> @@ -51,7 +50,7 @@ limitations under the License. --> <dependency> <groupId>es.tid.netphony</groupId> <artifactId>network-protocols</artifactId> - <version>1.1-SNAPSHOT</version> + <version>1.4.1</version> <!-- Next local repo only on docker container --> <!-- <scope>system</scope> <systemPath>/protocols/target/network-protocols-1.1-SNAPSHOT.jar</systemPath> --> diff --git a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/grpc/grpcClient.java b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/grpc/grpcClient.java index a7653783489ae8a0c67a78f9adbd896bbb49a342..059f8c6789a8600f5087ad13886c623ee39a2396 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/grpc/grpcClient.java +++ b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/grpc/grpcClient.java @@ -137,6 +137,7 @@ public class grpcClient { setAsPathSegment(Integer.toString(update.getAsPathSegment())). addAllLink(l).build(); }else if(nodes.size()>0&& l.size()==0) { + logger.debug("ADDING NODE"); request=updateRequest.newBuilder(). setNextHop(update.getNextHop().toString()). setAddressFamilyID(Integer.toString(update.getAFI())). @@ -145,6 +146,7 @@ public class grpcClient { }else { //Error if node name is null // TODO: handle seng grpc error? + logger.debug("ADDING NODE AND LINK"); request=updateRequest.newBuilder(). setNextHop("-"+update.getNextHop().toString()). setAddressFamilyID(Integer.toString(update.getAFI())). diff --git a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/LinkNLRIMsg.java b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/LinkNLRIMsg.java index e2a5a660cfedfdeacf61edaee19148b6bcafb304..9455ccd1cb9e55ecb85925db19bf62f97cf12703 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/LinkNLRIMsg.java +++ b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/LinkNLRIMsg.java @@ -1,3 +1,4 @@ + // Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) // Licensed under the Apache License, Version 2.0 (the "License"); @@ -140,10 +141,10 @@ public class LinkNLRIMsg { IGP_type = linkNLRI.getRemoteNodeDescriptorsTLV().getIGPRouterID().getIGP_router_id_type(); switch (IGP_type) { case 1: - remoteIGPID = Integer.toString(linkNLRI.getRemoteNodeDescriptorsTLV().getIGPRouterID().getISIS_ISO_NODE_ID()); + remoteBgplsID = Integer.toString(linkNLRI.getRemoteNodeDescriptorsTLV().getIGPRouterID().getISIS_ISO_NODE_ID()); break; case 2: - remoteIGPID = Integer.toString(linkNLRI.getRemoteNodeDescriptorsTLV().getIGPRouterID().getISIS_ISO_NODE_ID()); + remoteBgplsID = Integer.toString(linkNLRI.getRemoteNodeDescriptorsTLV().getIGPRouterID().getISIS_ISO_NODE_ID()); case 3: remoteIGPID = linkNLRI.getRemoteNodeDescriptorsTLV().getIGPRouterID().getIpv4AddressOSPF().toString(); break; @@ -204,13 +205,6 @@ public class LinkNLRIMsg { } - public String localIGPID() { - return localIGPID; - } - public String remoteIGPID() { - return remoteIGPID; - } - public String getLearntFrom() { return learntFrom; } diff --git a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/UpdateMsg.java b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/UpdateMsg.java index 3b837ed929048505628a0936105ac91b115603a4..37d87e6836734989ec38d08741f51495262c7d3b 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/UpdateMsg.java +++ b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/models/UpdateMsg.java @@ -29,11 +29,17 @@ public class UpdateMsg { private LinkNLRIMsg link; private PathAttributeMsg path; private List <LinkNLRIMsg> linkList = new ArrayList<>(); + private List <NodeNLRIMsg> nodeList = new ArrayList<>(); public List <LinkNLRIMsg> getLinkList(){ return this.linkList; } - + public List <NodeNLRIMsg> getNodeList(){ + return this.nodeList; + } + public void addNode(NodeNLRIMsg node) { + this.nodeList.add(node); + } public int getAFI() { return AFI; } @@ -70,6 +76,9 @@ public class UpdateMsg { public boolean linkCheck(){ return linkList.size()>0; } + public boolean nodeCheck(){ + return nodeList.size()>0; + } public void setLink(LinkNLRIMsg link) { this.link = link; } diff --git a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/updateTEDB/UpdateProccesorThread.java b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/updateTEDB/UpdateProccesorThread.java index b82d122df0002cd8122af534b56dbd61214747b0..cedbe3498ddd1534452545f8a10ede59c37d3a71 100644 --- a/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/updateTEDB/UpdateProccesorThread.java +++ b/src/bgpls_speaker/service/java/netphony-topology/src/main/java/eu/teraflow/tid/bgp4Peer/updateTEDB/UpdateProccesorThread.java @@ -329,7 +329,7 @@ public class UpdateProccesorThread extends Thread { NodeNLRIMsg NnlriMsg = new NodeNLRIMsg((NodeNLRI)linkstateNLRI,learntFrom,currentName); log.info("Node info: " + NnlriMsg.toString()); updateMsgList.addNodeToJson(NnlriMsg,currentName); - update.setNode(NnlriMsg);//set for grpc msg + update.addNode(NnlriMsg);//set for grpc msg } currentName=null; continue; diff --git a/src/bgpls_speaker/service/resources/BGP4Parameters_3.xml b/src/bgpls_speaker/service/resources/BGP4Parameters_3.xml deleted file mode 100644 index e706cd34f01b02ed362cae571c8a60176f477bfc..0000000000000000000000000000000000000000 --- a/src/bgpls_speaker/service/resources/BGP4Parameters_3.xml +++ /dev/null @@ -1,65 +0,0 @@ - <!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. --> - -<config> - <!-- TCP port where the BGP is listening for incoming bgp4 connections. Optional Parameter. Default value: 179 (BGP Port) --> - <BGP4Port>12179</BGP4Port> - <BGPIdentifier>1.1.1.1</BGPIdentifier> -<!-- TCP port to connect to manage the BGP connection. Default value: 1112 --> - <BGP4ManagementPort>1112</BGP4ManagementPort> - <!-- Peers to which this Peer is going to establish connection --> - <configPeer> - <peer>10.95.86.214</peer> - <export>false</export> - <import>true</import> - <peerPort>179</peerPort> - </configPeer> - - <!-- Ficheros log (servidor, protocolo PCEP y OSPF). Campos opcionales--> - <BGP4LogFile>BGP4Parser2.log</BGP4LogFile><!-- Default value: BGP4Parser.log --> - <BGP4LogFileClient>BGP4Client2.log</BGP4LogFileClient><!-- Default value: BGP4Client.log--> - <BGP4LogFileServer>BGP4Server2.log</BGP4LogFileServer><!-- Default value: BGP4Server.log--> - <!-- If the tcp no delay option is used or not. Optional Parameter. Default value: false. --> - <nodelay>true</nodelay> - <!-- Waiting Time to re-connect to clients. Default value: 6000 ms. --> - <delay>40000</delay> - <setTraces>true</setTraces> - <!-- OPEN Parameters --> - <!-- RFC 4271. This 2-octet unsigned integer indicates the number of seconds the sender proposes for the value of the Hold Timer. - Upon receipt of an OPEN message, a BGP speaker MUST calculate the value of the Hold Timer by using the smaller of its configured - Hold Time and the Hold Time received in the OPEN message. The Hold Time MUST be either zero or at least three seconds. An - implementation MAY reject connections on the basis of the Hold Time. The calculated value indicates the maximum number of - seconds that may elapse between the receipt of successive KEEPALIVE and/or UPDATE messages from the sender. --> - <holdTime>180</holdTime><!-- Optional Parameter. Default value: 3. --> - <!-- RFC 4271. This 1-octet unsigned integer indicates the protocol version number of the message. The current BGP version number is 4. --> - <version>4</version><!-- Optional Parameter. Default value: 4. --> - <!-- RFC 4271. This 2-octet unsigned integer indicates the Autonomous System number of the sender.--> - <myAutonomousSystem>65006</myAutonomousSystem> - <!-- RFC 4271. This 4-octet unsigned integer indicates the BGP Identifier of the sender. A given BGP speaker sets the value of its BGP - Identifier to an IP address that is assigned to that BGP speaker. The value of the BGP Identifier is determined upon - startup and is the same for every local interface and BGP peer. --> - <!--<BGPIdentifier>192.168.1.200</BGPIdentifier> --> - <!-- If the peer is in charge of sending its topology (only the interdomain Links) to the other BGP peer it is connected to. Default: false --> - <sendTopology>false</sendTopology> - <!-- If the peer is in charge of sending its whole topology to the other BGP peer it is connected to. Default: false --> - <sendIntradomainLinks>true</sendIntradomainLinks> - <!-- Optional Parameter. How to learn the topology. Possibilities: fromXML, fromBGP. Default: fromBGP --> - <learnTopology>fromBGP</learnTopology> - <!-- Topology network to read. It is mandatory if and only if learnTopology parameter is fromXML. --> - <!--<topologyFile>src/test/resources/network1.xml</topologyFile>--> - <!-- Optional Parameter. Instance Identifier for node and link NLRI. See rfc 6549. Default value: 0--> - <!--<instanceID>0</instanceID>--> - <!-- Optional Parameter. Default value: localhost --> - <localBGPAddress>0.0.0.0</localBGPAddress> -</config> \ No newline at end of file diff --git a/src/bgpls_speaker/service/resources/bgp_ls.jar b/src/bgpls_speaker/service/resources/bgp_ls.jar deleted file mode 100644 index 06000cf98a1b592e80fd96eee059d4200301f352..0000000000000000000000000000000000000000 Binary files a/src/bgpls_speaker/service/resources/bgp_ls.jar and /dev/null differ diff --git a/src/bgpls_speaker/service/resources/interfaces.json b/src/bgpls_speaker/service/resources/interfaces.json deleted file mode 100644 index 103d1518b11258f4a92d8774f78763fdedcb030e..0000000000000000000000000000000000000000 --- a/src/bgpls_speaker/service/resources/interfaces.json +++ /dev/null @@ -1,50 +0,0 @@ - -{ - "devices":[ - {"name":"HL2-1-1","IP-Addr":"10.95.90.76", - "interfaces":{ - "99.1.2.1":"GigabitEthernet0/0/0/0", - "99.1.3.1":"GigabitEthernet0/0/0/1", - "99.1.11.1":"GigabitEthernet0/0/0/2"}, - "links":{ - "GigabitEthernet0/0/0/0":"HL2-2-1", - "GigabitEthernet0/0/0/1":"HL2-3-1", - "GigabitEthernet0/0/0/2":"HL2-1-2" - } - }, - {"name":"HL2-3-1","IP-Addr":"10.95.90.78", - "interfaces":{ - "99.3.4.3":"GigabitEthernet0/0/0/0", - "99.1.3.3":"GigabitEthernet0/0/0/1", - "99.3.33.3":"GigabitEthernet0/0/0/2", - "99.3.52.2":"GigabitEthernet0/0/0/3"}, - "links":{ - "GigabitEthernet0/0/0/0":"HL2-4-1", - "GigabitEthernet0/0/0/1":"HL2-1-1", - "GigabitEthernet0/0/0/2":"HL2-3-2", - "GigabitEthernet0/0/0/3":"HL3-1-1" - }}, - {"name":"HL2-2-2","IP-Addr":"10.95.90.81", - "interfaces":{ - "99.1.12.2":"ge-0/0/0", - "99.2.24.2":"ge-0/0/1", - "99.2.22.22":"ge-0/0/2", - "99.2.99.2":"ge-0/0/9"}, - "links":{ - "ge-0/0/0":"HL2-1-2", - "ge-0/0/1":"HL2-4-2", - "ge-0/0/2":"HL2-2-1", - "ge-0/0/9":"HL2-4-2_OSPF" - }}, - {"name":"HL2-2-1","IP-Addr":"10.95.90.77", - "interfaces":{ - "99.1.2.2":"Gi0/0/0/0", - "99.2.4.2":"Gi0/0/0/1", - "99.2.22.2":"Gi0/0/0/2"}, - "links":{ - "Gi0/0/0/0":"HL2-1-1", - "Gi0/0/0/1":"HL2-4-1", - "Gi0/0/0/2":"HL2-2-2" - }} - ] -} \ No newline at end of file diff --git a/src/bgpls_speaker/service/tools/DiscoveredDBManager.py b/src/bgpls_speaker/service/tools/DiscoveredDBManager.py index a581adace375053e4cad06b9d98b42510506db4b..b7f8b0ba775b56fa970f91467bb9ebd2515af209 100644 --- a/src/bgpls_speaker/service/tools/DiscoveredDBManager.py +++ b/src/bgpls_speaker/service/tools/DiscoveredDBManager.py @@ -51,6 +51,8 @@ class DiscoveredDBManager: # is a link # Compare and update self.discoveredDB.append(update_request) + LOGGER.debug("(AddToDB) Actual DB: ") + LOGGER.debug("%s", [up.toString() for up in self.discoveredDB]) return True def GetDiscoveredDB(self): @@ -127,16 +129,16 @@ class DiscoveredDBManager: """ for update in self.discoveredDB: for link in update.links: - if(self.GetNodeNameFromLinkId(link.local.bgpls_id) is not None): - LOGGER.info("(UpdateNodeNameInLink) local %s: %s",link.local.bgpls_id, self.GetNodeNameFromLinkId(link.local.bgpls_id)) - link.local.node_name=self.GetNodeNameFromLinkId(link.local.bgpls_id) + if(self.GetNodeNameFromLinkId(link.localID) is not None): + LOGGER.info("(UpdateNodeNameInLink) local %s: %s",link.localID, self.GetNodeNameFromLinkId(link.localID)) + link.local.node_name=self.GetNodeNameFromLinkId(link.localID) else: - link.local.node_name=link.local.bgpls_id - if(self.GetNodeNameFromLinkId(link.remote.bgpls_id) is not None): - LOGGER.info("(UpdateNodeNameInLink) remote %s: %s",link.remote.bgpls_id, self.GetNodeNameFromLinkId(link.remote.bgpls_id)) - link.remote.node_name=self.GetNodeNameFromLinkId(link.remote.bgpls_id) + link.local.node_name=link.localID + if(self.GetNodeNameFromLinkId(link.remoteID) is not None): + LOGGER.info("(UpdateNodeNameInLink) remote %s: %s",link.remoteID, self.GetNodeNameFromLinkId(link.remoteID)) + link.remote.node_name=self.GetNodeNameFromLinkId(link.remoteID) else: - link.remote.node_name=link.remote.bgpls_id + link.remote.node_name=link.remoteID return True def RemoveLinkFromDB(self):