From 871ef3931c74e9b9fdf213b2391be74c39002341 Mon Sep 17 00:00:00 2001 From: armingol Date: Mon, 7 Jul 2025 11:33:48 +0200 Subject: [PATCH] New service to configure interfaces --- my_deploy.sh | 4 +- proto/context.proto | 15 +- proto/set_interface.proto | 22 + quick_deploy.sh | 438 ++++++++++++++++++ src/common/tools/object_factory/Service.py | 10 + src/common/type_checkers/Assertions.py | 2 + src/context/service/database/ConfigRule.py | 3 + src/context/service/database/Service.py | 2 + .../database/models/ConfigRuleModel.py | 1 + .../database/models/enums/ServiceType.py | 1 + src/device/service/Tools.py | 40 +- .../drivers/openconfig/OpenConfigDriver.py | 27 +- .../SET_INTERFACE_multivendor.py | 59 +++ .../drivers/openconfig/templates/Tools.py | 24 +- .../drivers/openconfig/templates/__init__.py | 135 +++++- .../frontend/service/algorithms/_Algorithm.py | 4 +- .../algorithms/tools/ComposeConfigRules.py | 125 +++-- .../service/algorithms/tools/ServiceTypes.py | 3 +- .../java/org/etsi/tfs/policy/Serializer.java | 4 + .../service_handler_api/FilterFields.py | 1 + .../service_handler_api/SettingsHandler.py | 29 ++ .../service/service_handler_api/Tools.py | 8 +- .../service/service_handlers/__init__.py | 7 + .../set_interface/ConfigRules.py | 59 +++ .../Set_InterfaceServiceHandler.py | 179 +++++++ .../set_interface/__init__.py | 13 + 26 files changed, 1111 insertions(+), 104 deletions(-) create mode 100644 proto/set_interface.proto create mode 100755 quick_deploy.sh create mode 100755 src/device/service/drivers/openconfig/templates/SET_INTERFACE/SET_INTERFACE_multivendor.py create mode 100644 src/service/service/service_handlers/set_interface/ConfigRules.py create mode 100644 src/service/service/service_handlers/set_interface/Set_InterfaceServiceHandler.py create mode 100644 src/service/service/service_handlers/set_interface/__init__.py diff --git a/my_deploy.sh b/my_deploy.sh index 4d3820f41..34dd137b8 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -134,7 +134,7 @@ export CRDB_PASSWORD="tfs123" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="" +export CRDB_DROP_DATABASE_IF_EXISTS="YES" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" @@ -186,7 +186,7 @@ export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" # Disable flag for dropping tables if they exist. -export QDB_DROP_TABLES_IF_EXIST="" +export QDB_DROP_TABLES_IF_EXIST="YES" # Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/proto/context.proto b/proto/context.proto index b33750e80..618a0cea9 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -20,6 +20,7 @@ import "google/protobuf/any.proto"; import "acl.proto"; import "kpi_sample_types.proto"; +import "set_interface.proto"; service ContextService { rpc ListContextIds (Empty ) returns ( ContextIdList ) {} @@ -78,7 +79,7 @@ service ContextService { rpc RemoveConnection (ConnectionId ) returns ( Empty ) {} rpc GetConnectionEvents(Empty ) returns (stream ConnectionEvent ) {} - + // ------------------------------ Experimental ----------------------------- rpc GetOpticalConfig (Empty ) returns (OpticalConfigList) {} rpc SetOpticalConfig (OpticalConfig ) returns (OpticalConfigId ) {} @@ -203,7 +204,7 @@ message Component { // Defined previously in this sectio Uuid component_uuid = 1; string name = 2; string type = 3; - + map attributes = 4; // dict[attr.name => json.dumps(attr.value)] string parent = 5; } @@ -331,6 +332,7 @@ enum ServiceTypeEnum { SERVICETYPE_L1NM = 8; SERVICETYPE_INT = 9; SERVICETYPE_ACL = 10; + SERVICETYPE_SET_INTERFACE = 11; } enum ServiceStatusEnum { @@ -544,11 +546,17 @@ message ConfigRule_ACL { acl.AclRuleSet rule_set = 2; } +message ConfigRule_SET_INTERFACE { + EndPointId endpoint_id = 1; + set_interface.SetInterfaceRuleSet rule_set = 2; +} + message ConfigRule { ConfigActionEnum action = 1; oneof config_rule { ConfigRule_Custom custom = 2; ConfigRule_ACL acl = 3; + ConfigRule_SET_INTERFACE set_interface = 4; } } @@ -579,7 +587,6 @@ message Location { oneof location { string region = 1; GPS_Position gps_position = 2; - string interface=3; string circuit_pack=4; } @@ -711,7 +718,7 @@ message OpticalLinkDetails { string dst_port = 3; string local_peer_port = 4; string remote_peer_port = 5 ; - bool used = 6 ; + bool used = 6 ; map c_slots = 7; map l_slots = 8; map s_slots = 9; diff --git a/proto/set_interface.proto b/proto/set_interface.proto new file mode 100644 index 000000000..e37194cf9 --- /dev/null +++ b/proto/set_interface.proto @@ -0,0 +1,22 @@ +// Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package set_interface; + +message SetInterfaceRuleSet { + string ip = 1; + string mask = 3; + string vlan = 4; +} \ No newline at end of file diff --git a/quick_deploy.sh b/quick_deploy.sh new file mode 100755 index 000000000..dcd7d3275 --- /dev/null +++ b/quick_deploy.sh @@ -0,0 +1,438 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# If not already set, set the URL of the Docker registry where the images will be uploaded to. +# By default, assume internal MicroK8s registry is used. +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} + +# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. +# By default, only basic components are deployed +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui"} + +# If not already set, set the tag you want to use for your images. +export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} + +# If not already set, set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +# If not already set, set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} + +# If not already set, set the new Grafana admin password +export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} + +# If not already set, disable skip-build flag to rebuild the Docker images. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-"YES"} + +# If TFS_SKIP_BUILD is "YES", select the containers to be build +# Any other container will use previous docker images +export TFS_QUICK_COMPONENTS="service" + +# ----- CockroachDB ------------------------------------------------------------ + +# If not already set, set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} + +# If not already set, set the database username to be used by Context. +export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"} + +# If not already set, set the database user's password to be used by Context. +export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} + +# If not already set, set the database name to be used by Context. +export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + + +# ----- NATS ------------------------------------------------------------------- + +# If not already set, set the namespace where NATS will be deployed. +export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} + + +# ----- QuestDB ---------------------------------------------------------------- + +# If not already set, set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} + +# If not already set, set the database username to be used for QuestDB. +export QDB_USERNAME=${QDB_USERNAME:-"admin"} + +# If not already set, set the database user's password to be used for QuestDB. +export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} + +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} + +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Constants +GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" +TMP_FOLDER="./tmp" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" +mkdir -p $TMP_MANIFESTS_FOLDER +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +mkdir -p $TMP_LOGS_FOLDER + +echo "Deleting and Creating a new namespace..." +kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found +kubectl create namespace $TFS_K8S_NAMESPACE +printf "\n" + +echo "Create secret with CockroachDB data" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with NATS data" +NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') +kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \ + --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} +printf "\n" + +echo "Create secret with QuestDB data" +QDB_HTTP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') +QDB_ILP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="ilp")].port}') +QDB_SQL_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +METRICSDB_HOSTNAME="questdb-public.${QDB_NAMESPACE}.svc.cluster.local" +kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=QDB_NAMESPACE=${QDB_NAMESPACE} \ + --from-literal=METRICSDB_HOSTNAME=${METRICSDB_HOSTNAME} \ + --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \ + --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \ + --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \ + --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \ + --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \ + --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \ + --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} +printf "\n" + +echo "Deploying components and collecting environment variables..." +ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh +echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT +PYTHONPATH=$(pwd)/src +echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT + +for COMPONENT in $TFS_COMPONENTS; do + echo "Processing '$COMPONENT' component..." + + if [ "$TFS_SKIP_BUILD" != "YES" ]; then + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + elif [ "$COMPONENT" == "pathcomp" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" + docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" + docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" + docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" + docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" + else + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" + fi + + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." + + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" + docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" + docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi + else + for QUICK_COMPONENT in $TFS_QUICK_COMPONENTS; do + if [ "$COMPONENT" == "$QUICK_COMPONENT" ]; then + + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${QUICK_COMPONENT}.log" + + docker build -t "$QUICK_COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$QUICK_COMPONENT"/Dockerfile . > "$BUILD_LOG" + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." + + + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$QUICK_COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${QUICK_COMPONENT}.log" + docker tag "$QUICK_COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${QUICK_COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi + done + fi + + echo " Adapting '$COMPONENT' manifest file..." + MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" + cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" + + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + fi + + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" + + # TODO: harmonize names of the monitoring component + + echo " Deploying '$COMPONENT' component to Kubernetes..." + DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" + kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") + #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + + echo " Collecting env-vars for '$COMPONENT' component..." + + SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME}service --namespace $TFS_K8S_NAMESPACE -o json) + if [ -z "${SERVICE_DATA}" ]; then continue; fi + + # Env vars for service's host address + SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP') + if [ -z "${SERVICE_HOST}" ]; then continue; fi + ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT + + # Env vars for service's 'grpc' port (if any) + SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port') + if [ -n "${SERVICE_PORT_GRPC}" ]; then + ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT + fi + + # Env vars for service's 'http' port (if any) + SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port') + if [ -n "${SERVICE_PORT_HTTP}" ]; then + ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]') + echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT + fi + + printf "\n" +done + +echo "Deploying extra manifests..." +for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do + echo "Processing manifest '$EXTRA_MANIFEST'..." + if [[ "$EXTRA_MANIFEST" == *"servicemonitor"* ]]; then + kubectl apply -f $EXTRA_MANIFEST + else + kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST + fi + printf "\n" +done +printf "\n" + +for COMPONENT in $TFS_COMPONENTS; do + echo "Waiting for '$COMPONENT' component..." + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") + kubectl wait --namespace $TFS_K8S_NAMESPACE \ + --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service + printf "\n" +done + +if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then + echo "Configuring WebUI DataStores and Dashboards..." + sleep 5 + + # Exposed through the ingress controller "tfs-ingress" + GRAFANA_URL="127.0.0.1:80/grafana" + + # Default Grafana credentials + GRAFANA_USERNAME="admin" + GRAFANA_PASSWORD="admin" + + # Configure Grafana Admin Password + # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password + GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}" + + echo ">> Updating Grafana 'admin' password..." + curl -X PUT -H "Content-Type: application/json" -d '{ + "oldPassword": "'${GRAFANA_PASSWORD}'", + "newPassword": "'${TFS_GRAFANA_PASSWORD}'", + "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'" + }' ${GRAFANA_URL_DEFAULT}/api/user/password + echo + echo + + # Updated Grafana API URL + GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}" + echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT + + echo ">> Installing Scatter Plot plugin..." + curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \ + ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install + echo + + # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/ + QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}" + echo ">> Creating datasources..." + curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ + "access" : "proxy", + "type" : "postgres", + "name" : "questdb-mon-kpi", + "url" : "'${QDB_HOST_PORT}'", + "database" : "'${QDB_TABLE_MONITORING_KPIS}'", + "user" : "'${QDB_USERNAME}'", + "basicAuth": false, + "isDefault": true, + "jsonData" : { + "sslmode" : "disable", + "postgresVersion" : 1100, + "maxOpenConns" : 0, + "maxIdleConns" : 2, + "connMaxLifetime" : 14400, + "tlsAuth" : false, + "tlsAuthWithCACert" : false, + "timescaledb" : false, + "tlsConfigurationMethod": "file-path", + "tlsSkipVerify" : true + }, + "secureJsonData": {"password": "'${QDB_PASSWORD}'"} + }' ${GRAFANA_URL_UPDATED}/api/datasources + echo + + curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ + "access" : "proxy", + "type" : "postgres", + "name" : "questdb-slc-grp", + "url" : "'${QDB_HOST_PORT}'", + "database" : "'${QDB_TABLE_SLICE_GROUPS}'", + "user" : "'${QDB_USERNAME}'", + "basicAuth": false, + "isDefault": false, + "jsonData" : { + "sslmode" : "disable", + "postgresVersion" : 1100, + "maxOpenConns" : 0, + "maxIdleConns" : 2, + "connMaxLifetime" : 14400, + "tlsAuth" : false, + "tlsAuthWithCACert" : false, + "timescaledb" : false, + "tlsConfigurationMethod": "file-path", + "tlsSkipVerify" : true + }, + "secureJsonData": {"password": "'${QDB_PASSWORD}'"} + }' ${GRAFANA_URL_UPDATED}/api/datasources + printf "\n\n" + + echo ">> Creating dashboards..." + # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/ + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \ + ${GRAFANA_URL_UPDATED}/api/dashboards/db + echo + + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \ + ${GRAFANA_URL_UPDATED}/api/dashboards/db + printf "\n\n" + + echo ">> Staring dashboards..." + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit" + DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') + curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} + echo + + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps" + DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') + curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} + echo + + printf "\n\n" +fi diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index 2bfe50ccc..9f5c35435 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -64,6 +64,16 @@ def json_service_l2nm_planned( status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) +def json_service_set_interface_planned( + service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME + ): + + return json_service( + service_uuid, ServiceTypeEnum.SERVICETYPE_SET_INTERFACE, context_id=json_context_id(context_uuid), + status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, + config_rules=config_rules) + def json_service_l3nm_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py index e41b0d0d3..c09d0c962 100644 --- a/src/common/type_checkers/Assertions.py +++ b/src/common/type_checkers/Assertions.py @@ -109,6 +109,7 @@ def validate_service_type_enum(message): 'SERVICETYPE_E2E', 'SERVICETYPE_OPTICAL_CONNECTIVITY', 'SERVICETYPE_QKD', + 'SERVICETYPE_SET_INTERFACE', ] def validate_service_state_enum(message): @@ -146,6 +147,7 @@ def validate_uuid(message, allow_empty=False): CONFIG_RULE_TYPES = { 'custom', 'acl', + 'set_interface', } def validate_config_rule(message): assert isinstance(message, dict) diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py index c9db5488c..97dda2a4d 100644 --- a/src/context/service/database/ConfigRule.py +++ b/src/context/service/database/ConfigRule.py @@ -71,6 +71,9 @@ def compose_config_rules_data( _, _, endpoint_uuid = endpoint_get_uuid(config_rule.acl.endpoint_id, allow_random=False) rule_set_name = config_rule.acl.rule_set.name configrule_name = '{:s}:{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid, rule_set_name) + elif kind == ConfigRuleKindEnum.SET_INTERFACE: + _, _, endpoint_uuid = endpoint_get_uuid(config_rule.set_interface.endpoint_id, allow_random=False) + configrule_name = '{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid) else: MSG = 'Name for ConfigRule({:s}) cannot be inferred '+\ '(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})' diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index 9076fc025..e9e9996c8 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -88,6 +88,8 @@ def service_set(db_engine : Engine, messagebroker : MessageBroker, request : Ser service_type = grpc_to_enum__service_type(request.service_type) if service_type is None and request.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: service_type = "OPTICAL_CONNECTIVITY" + if service_type is None and request.service_type == ServiceTypeEnum.SERVICETYPE_SET_INTERFACE: + service_type = "SET_INTERFACE" service_status = grpc_to_enum__service_status(request.service_status.service_status) diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index 5462df672..3cc2efd21 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -23,6 +23,7 @@ from ._Base import _Base class ConfigRuleKindEnum(enum.Enum): CUSTOM = 'custom' ACL = 'acl' + SET_INTERFACE = 'set_interface' class DeviceConfigRuleModel(_Base): __tablename__ = 'device_configrule' diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py index 93271c3b9..d37ce5f91 100644 --- a/src/context/service/database/models/enums/ServiceType.py +++ b/src/context/service/database/models/enums/ServiceType.py @@ -33,6 +33,7 @@ class ORM_ServiceTypeEnum(enum.Enum): QKD = ServiceTypeEnum.SERVICETYPE_QKD INT = ServiceTypeEnum.SERVICETYPE_INT ACL = ServiceTypeEnum.SERVICETYPE_ACL + SET_INTERFACE = ServiceTypeEnum.SERVICETYPE_SET_INTERFACE grpc_to_enum__service_type = functools.partial( grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index a62a0d702..0c2fb0a0f 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -155,7 +155,7 @@ def populate_endpoints( _sub_device.name = resource_value['name'] _sub_device.device_type = resource_value['type'] _sub_device.device_operational_status = resource_value['status'] - + # Sub-devices might not have a driver assigned. if 'drivers' in resource_value: drivers = resource_value['drivers'] @@ -299,9 +299,9 @@ def populate_initial_config_rules(device_uuid : str, device_config : DeviceConfi def compute_rules_to_add_delete( device : Device, request : Device ) -> Tuple[List[Tuple[str, Any]], List[Tuple[str, Any]]]: - # convert config rules from context into a dictionary + # convert config rules from context into a dictionary context_config_rules = {} - for config_rule in device.device_config.config_rules: + for config_rule in device.device_config.config_rules: config_rule_kind = config_rule.WhichOneof('config_rule') if config_rule_kind == 'custom': # process "custom" rules context_config_rules[config_rule.custom.resource_key] = config_rule.custom.resource_value # get the resource value of the rule resource @@ -310,25 +310,43 @@ def compute_rules_to_add_delete( endpoint_uuid = config_rule.acl.endpoint_id.endpoint_uuid.uuid # get the endpoint name acl_ruleset_name = config_rule.acl.rule_set.name # get the acl name ACL_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/acl_ruleset[{:s}]' - key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, acl_ruleset_name) + key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, acl_ruleset_name) context_config_rules[key_or_path] = grpc_message_to_json(config_rule.acl) # get the resource value of the acl - + + elif config_rule_kind == 'set_interface': # process "set_interface" rules + device_uuid = config_rule.set_interface.endpoint_id.device_id.device_uuid.uuid # get the device name + endpoint_uuid = config_rule.set_interface.endpoint_id.endpoint_uuid.uuid # get the endpoint name request_config_rules = [] + set_interface_ruleset_name = config_rule.set_interface.rule_set.name # get the ip_link name + SET_INTERFACE_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/set_interface_ruleset[{:s}]' + key_or_path = SET_INTERFACE_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, set_interface_ruleset_name) + context_config_rules[key_or_path] = grpc_message_to_json(config_rule.set_interface) + LOGGER.debug(f'context_config_rules [{key_or_path}] = {context_config_rules[key_or_path]}') + request_config_rules = [] for config_rule in request.device_config.config_rules: config_rule_kind = config_rule.WhichOneof('config_rule') - if config_rule_kind == 'custom': # resource management of "custom" rule + if config_rule_kind == 'custom': # resource management of "custom" rule request_config_rules.append(( config_rule.action, config_rule.custom.resource_key, config_rule.custom.resource_value )) - elif config_rule_kind == 'acl': # resource management of "acl" rule + elif config_rule_kind == 'acl': # resource management of "acl" rule device_uuid = config_rule.acl.endpoint_id.device_id.device_uuid.uuid endpoint_uuid = config_rule.acl.endpoint_id.endpoint_uuid.uuid acl_ruleset_name = config_rule.acl.rule_set.name ACL_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/acl_ruleset[{:s}]' - key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, acl_ruleset_name) + key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, acl_ruleset_name) request_config_rules.append(( config_rule.action, key_or_path, grpc_message_to_json(config_rule.acl) )) + elif config_rule_kind == 'set_interface': # resource management of "set_interface" rule + device_uuid = config_rule.set_interface.endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = config_rule.set_interface.endpoint_id.endpoint_uuid.uuid + SET_INTERFACE_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/set_interface_ruleset' + key_or_path = SET_INTERFACE_KEY_TEMPLATE.format(device_uuid, endpoint_uuid) + request_config_rules.append(( + config_rule.action, key_or_path, grpc_message_to_json(config_rule.set_interface) + )) + LOGGER.debug(f'context_config_rules= {request_config_rules}') resources_to_set : List[Tuple[str, Any]] = [] # key, value resources_to_delete : List[Tuple[str, Any]] = [] # key, value @@ -407,7 +425,7 @@ def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loo return errors def unsubscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]: - kpi_uuid = request.kpi_id.kpi_id.uuid + kpi_uuid = request.kpi_id.kpi_id.uuid kpi_details = monitoring_loops.get_kpi_by_uuid(kpi_uuid) if kpi_details is None: @@ -502,7 +520,7 @@ def extract_resources(config : dict, device : Device) -> list[list[dict], dict]: else : resources.append(is_key_existed('channel_namespace', config)) resources.append(is_key_existed('add_transceiver', config)) - + conditions['is_opticalband'] = is_opticalband if 'flow' in config: #for tuple_value in config['flow'][device.name]: @@ -510,7 +528,7 @@ def extract_resources(config : dict, device : Device) -> list[list[dict], dict]: dest_vals = [] handled_flow = [] for tuple_value in config['flow']: - source_port = None + source_port = None destination_port = None source_port_uuid, destination_port_uuid = tuple_value if source_port_uuid != '0': diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index 4bf7be22c..011700337 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -30,7 +30,7 @@ from device.service.driver_api.Exceptions import UnsupportedResourceKeyException from device.service.driver_api._Driver import _Driver from device.service.driver_api.AnyTreeTools import TreeNode, get_subnode, set_subnode_value #dump_subtree #from .Tools import xml_pretty_print, xml_to_dict, xml_to_file -from .templates import ALL_RESOURCE_KEYS, EMPTY_CONFIG, compose_config, get_filter, parse, cli_compose_config +from .templates import ALL_RESOURCE_KEYS, EMPTY_CONFIG, compose_config, get_filter, parse, cli_compose_config, ufi_interface, cisco_interface from .RetryDecorator import retry DEBUG_MODE = False @@ -96,14 +96,14 @@ class NetconfSessionHandler: def use_candidate(self): return self.__candidate_supported and not self.__force_running @property - def commit_per_rule(self): return self.__commit_per_rule + def commit_per_rule(self): return self.__commit_per_rule @property def vendor(self): return self.__vendor - + @property def version(self): return self.__version - + @property def message_renderer(self): return self.__message_renderer @@ -212,10 +212,17 @@ def edit_config( ): str_method = 'DeleteConfig' if delete else 'SetConfig' results = [] - if "L2VSI" in resources[0][1] and netconf_handler.vendor == "CISCO": - #Configure by CLI - logger.warning("CLI Configuration") - cli_compose_config(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password) + if netconf_handler.vendor == "CISCO": + if "L2VSI" in resources[0][1]: + cli_compose_config(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password) + for i,resource in enumerate(resources): + results.append(True) + else: + cisco_interface(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password) + for i,resource in enumerate(resources): + results.append(True) + elif netconf_handler.vendor == "UFISPACE": + ufi_interface(resources, delete=delete) for i,resource in enumerate(resources): results.append(True) else: @@ -229,7 +236,7 @@ def edit_config( chk_string(str_resource_name + '.key', resource_key, allow_empty=False) str_config_messages = compose_config( # get template for configuration resource_key, resource_value, delete=delete, vendor=netconf_handler.vendor, message_renderer=netconf_handler.message_renderer) - for str_config_message in str_config_messages: # configuration of the received templates + for str_config_message in str_config_messages: # configuration of the received templates if str_config_message is None: raise UnsupportedResourceKeyException(resource_key) logger.debug('[{:s}] str_config_message[{:d}] = {:s}'.format( str_method, len(str_config_message), str(str_config_message))) @@ -240,7 +247,7 @@ def edit_config( netconf_handler.commit() # configuration commit if 'table_connections' in resource_key: time.sleep(5) # CPU usage might exceed critical level after route redistribution, BGP daemon needs time to reload - + #results[i] = True results.append(True) except Exception as e: # pylint: disable=broad-except diff --git a/src/device/service/drivers/openconfig/templates/SET_INTERFACE/SET_INTERFACE_multivendor.py b/src/device/service/drivers/openconfig/templates/SET_INTERFACE/SET_INTERFACE_multivendor.py new file mode 100755 index 000000000..b1e8438b1 --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/SET_INTERFACE/SET_INTERFACE_multivendor.py @@ -0,0 +1,59 @@ +# Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from yattag import Doc, indent + +def set_interface_mgmt(data,vendor, delete): + doc, tag, text = Doc().tagtext() + + ID = data['endpoint_id']['endpoint_uuid']['uuid'] + DATA = data["rule_set"] + + with tag('interfaces', xmlns="http://openconfig.net/yang/interfaces"): + if delete == True: + with tag('interface' ,'xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete"'): + with tag('name'):text(ID) + else: + with tag('interface'): + with tag('name'):text(ID) + with tag('config'): + with tag('name'):text(ID) + with tag('type', 'xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type"'):text('ianaift:l3ipvlan') + with tag('enabled'):text('true') + with tag('subinterfaces'): + with tag('subinterface'): + if vendor is None or vendor == 'ADVA': + with tag('index'): text('0') + with tag('config'): + with tag('index'): text('0') + if vendor == 'ADVA' and not 'vlan'in data: + with tag('untagged-allowed', 'xmlns="http://www.advaoptical.com/cim/adva-dnos-oc-interfaces"'):text('true') + with tag('vlan', xmlns="http://openconfig.net/yang/vlan"): + with tag('match'): + with tag('single-tagged'): + with tag('config'): + with tag('vlan-id'):text(DATA['vlan']) + with tag('ipv4', xmlns="http://openconfig.net/yang/interfaces/ip"): + with tag('addresses'): + with tag('address'): + with tag('ip'):text(DATA['ip']) + with tag('config'): + with tag('ip'):text(DATA['ip']) + with tag('prefix-length'):text(DATA['mask']) + result = indent( + doc.getvalue(), + indentation = ' '*2, + newline = '\r\n' + ) + return result diff --git a/src/device/service/drivers/openconfig/templates/Tools.py b/src/device/service/drivers/openconfig/templates/Tools.py index 62a0ab962..1ea649595 100644 --- a/src/device/service/drivers/openconfig/templates/Tools.py +++ b/src/device/service/drivers/openconfig/templates/Tools.py @@ -15,9 +15,10 @@ import json import lxml.etree as ET from typing import Collection, Dict, Any -from .ACL.ACL_multivendor import acl_mgmt +from .ACL.ACL_multivendor import acl_mgmt +from .SET_INTERFACE.SET_INTERFACE_multivendor import set_interface_mgmt from .VPN.Network_instance_multivendor import create_NI, associate_virtual_circuit, associate_RP_to_NI, add_protocol_NI, create_table_conns, associate_If_to_NI -from .VPN.Interfaces_multivendor import create_If_SubIf +from .VPN.Interfaces_multivendor import create_If_SubIf from .VPN.Routing_policy import create_rp_def, create_rp_statement def add_value_from_tag(target : Dict, field_name: str, field_value : ET.Element, cast=None) -> None: @@ -32,20 +33,20 @@ def add_value_from_collection(target : Dict, field_name: str, field_value : Coll """ # Method Name: generate_templates - + # Parameters: - resource_key: [str] Variable to identify the rule to be executed. - resource_value: [str] Variable with the configuration parameters of the rule to be executed. - delete: [bool] Variable to identify whether to create or delete the rule. - vendor: [str] Variable to identify the vendor of the equipment to be configured. - + # Functionality: - This method generates the template to configure the equipment using pyangbind. + This method generates the template to configure the equipment using pyangbind. To generate the template the following steps are performed: 1) Get the first parameter of the variable "resource_key" to identify the main path of the rule. 2) Search for the specific configuration path - 3) Call the method with the configuration parameters (resource_data variable). - + 3) Call the method with the configuration parameters (resource_data variable). + # Return: [dict] Set of templates generated according to the configuration rule """ @@ -70,7 +71,7 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend else: result_templates.append(create_NI(data,vendor,delete)) - if "interface" in list_resource_key[1]: # interface rules management + elif "interface" in list_resource_key[1]: # interface rules management data: Dict[str, Any] = json.loads(resource_value) #data['DEL'] = delete if "subinterface" in resource_key: @@ -83,8 +84,9 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend result_templates.append(create_rp_def(data, delete)) else: result_templates.append(create_rp_statement(data, delete)) - else: - if "acl_ruleset" in resource_key: # acl rules management - result_templates.extend(acl_mgmt(resource_value,vendor, delete)) + elif "acl_ruleset" in resource_key: # acl rules management + result_templates.extend(acl_mgmt(resource_value,vendor, delete)) + elif "set_interface" in resource_key: + result_templates.extend(set_interface_mgmt(resource_value, vendor, delete))# set interface rules management return result_templates \ No newline at end of file diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py index 47559efda..21a00e001 100644 --- a/src/device/service/drivers/openconfig/templates/__init__.py +++ b/src/device/service/drivers/openconfig/templates/__init__.py @@ -87,18 +87,18 @@ def parse(resource_key : str, xml_data : ET.Element): """ # Method Name: compose_config - + # Parameters: - resource_key: [str] Variable to identify the rule to be executed. - resource_value: [str] Variable with the configuration parameters of the rule to be executed. - delete: [bool] Variable to identify whether to create or delete the rule. - vendor: [str] Variable to identify the vendor of the equipment to be configured. - message_renderer [str] Variable to dientify template generation method. Can be "jinja" or "pyangbind". - + # Functionality: This method calls the function obtains the equipment configuration template according to the value of the variable "message_renderer". - Depending on the value of this variable, it gets the template with "jinja" or "pyangbind". - + Depending on the value of this variable, it gets the template with "jinja" or "pyangbind". + # Return: [dict] Set of templates obtained according to the configuration method """ @@ -149,30 +149,30 @@ def compose_config( # template generation template.render(**data, operation=operation, vendor=vendor).strip()) for template in templates ] - + else: - raise ValueError('Invalid message_renderer value: {}'.format(message_renderer)) + raise ValueError('Invalid message_renderer value: {}'.format(message_renderer)) """ # Method Name: cli_compose_config - + # Parameters: - resource_key: [str] Variable to identify the rule to be executed. - resource_value: [str] Variable with the configuration parameters of the rule to be executed. - delete: [bool] Variable to identify whether to create or delete the rule. - vendor: [str] Variable to identify the vendor of the equipment to be configured. - message_renderer [str] Variable to dientify template generation method. Can be "jinja" or "pyangbind". - + # Functionality: This method calls the function obtains the equipment configuration template according to the value of the variable "message_renderer". - Depending on the value of this variable, it gets the template with "jinja" or "pyangbind". - + Depending on the value of this variable, it gets the template with "jinja" or "pyangbind". + # Return: [dict] Set of templates obtained according to the configuration method """ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str): #Method used for configuring via CLI directly L2VPN in CISCO devices - + key_value_data = {} for path, json_str in resources: @@ -194,13 +194,13 @@ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str if 'index' in data: subif_index = data['index'] if 'description' in data: description = data['description'] else: description = " " - + # initialize the SSH client ssh_client = paramiko.SSHClient() ssh_client.load_system_host_keys() # add to known hosts ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - + try: ssh_client.connect(hostname=host, username=user, password=passw, look_for_keys=False) #print("Connection successful") @@ -209,7 +209,7 @@ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str #print("[!] Cannot connect to the SSH Server") LOGGER.warning("[!] Cannot connect to the SSH Server") exit() - + try: # Open an SSH shell channel = ssh_client.invoke_shell() @@ -227,7 +227,7 @@ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str time.sleep(0.1) channel.send('commit\n') time.sleep(0.1) - + channel.send('l2vpn\n') time.sleep(0.1) channel.send('load-balancing flow src-dst-ip\n') @@ -259,7 +259,7 @@ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str channel.send(f"description {description}\n") time.sleep(0.1) channel.send('commit\n') - time.sleep(0.1) + time.sleep(0.1) # Capturar la salida del comando output = channel.recv(65535).decode('utf-8') #print(output) @@ -272,4 +272,105 @@ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str # Close the SSH client ssh_client.close() - \ No newline at end of file + +def ufi_interface(resources, delete: bool): #Method used for configuring via CLI directly L2VPN in CISCO devices + + key_value_data = {} + + for path, json_str in resources: + key_value_data[path] = json_str + + # initialize the SSH client + ssh_client = paramiko.SSHClient() + ssh_client.load_system_host_keys() + # add to known hosts + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + try: + ssh_client.connect(hostname='10.95.90.75', username='dnroot', password='dnroot', look_for_keys=False) + LOGGER.warning("Connection successful") + except: + LOGGER.warning("[!] Cannot connect to the SSH Server") + exit() + interface = 'ge100-0/0/3/1' + ip = '1.1.1.1' + mask = '24' + vlan = '1212' + try: + # Open an SSH shell + channel = ssh_client.invoke_shell() + time.sleep(5) + channel.send('config\n') + time.sleep(1) + channel.send(f'interfaces {interface} \n') + time.sleep(1) + channel.send('admin-state enabled \n') + time.sleep(1) + channel.send(f'ipv4-address {ip}/{mask} \n') + time.sleep(1) + channel.send(f'vlan-id {vlan} \n') + time.sleep(1) + channel.send('commit\n') + time.sleep(1) + + output = channel.recv(65535).decode('utf-8') + LOGGER.warning(output) + # Close the SSH shell + channel.close() + + except Exception as e: + LOGGER.exception(f"Error with the CLI configuration: {e}") + + # Close the SSH client + ssh_client.close() + +def cisco_interface(resources, delete: bool, host: str, user: str, passw: str): #Method used for configuring via CLI directly L2VPN in CISCO devices + + key_value_data = {} + + for path, json_str in resources: + key_value_data[path] = json_str + + # initialize the SSH client + ssh_client = paramiko.SSHClient() + ssh_client.load_system_host_keys() + # add to known hosts + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + try: + ssh_client.connect(hostname='10.90.95.150', username='cisco', password='cisco123', look_for_keys=False) + LOGGER.warning("Connection successful") + except: + LOGGER.warning("[!] Cannot connect to the SSH Server") + exit() + interface = 'FourHundredGigE0/0/0/10.1212' + ip = '1.1.1.1' + mask = '24' + vlan = '1212' + try: + # Open an SSH shell + channel = ssh_client.invoke_shell() + time.sleep(1) + channel.send('config\n') + time.sleep(0.1) + channel.send(f'interface {interface} \n') + time.sleep(0.1) + channel.send('no shutdown\n') + time.sleep(0.1) + channel.send(f'ipv4 address {ip}/{mask} \n') + time.sleep(0.1) + channel.send(f'encapsulation dot1q {vlan} \n') + time.sleep(0.1) + channel.send('commit\n') + time.sleep(0.1) + + output = channel.recv(65535).decode('utf-8') + LOGGER.warning(output) + # Close the SSH shell + channel.close() + + except Exception as e: + LOGGER.exception(f"Error with the CLI configuration: {e}") + + # Close the SSH client + ssh_client.close() diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index a5bfe1352..ae3fe04dc 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -24,7 +24,7 @@ from pathcomp.frontend.Config import BACKEND_URL from .tools.EroPathToHops import eropath_to_hops from .tools.ComposeConfigRules import ( compose_device_config_rules, compose_l2nm_config_rules, compose_l3nm_config_rules, compose_tapi_config_rules, - generate_neighbor_endpoint_config_rules + generate_neighbor_endpoint_config_rules, compose_setinterface_config_rules ) from .tools.ComposeRequest import compose_device, compose_link, compose_service from .tools.ComputeSubServices import ( @@ -189,6 +189,8 @@ class _Algorithm: elif service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE and rules_nb == 0: compose_tapi_config_rules(config_rules, service.service_config.config_rules) self.logger.info("Installing default rules for TAPI service") + if service_type == ServiceTypeEnum.SERVICETYPE_SET_INTERFACE: + compose_setinterface_config_rules(config_rules, service.service_config.config_rules) else: MSG = 'Unhandled generic Config Rules for service {:s} {:s}' self.logger.warning(MSG.format(str(service_uuid), str(ServiceTypeEnum.Name(service_type)))) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py index 073c3474f..addc4e96e 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py @@ -54,6 +54,10 @@ TAPI_SETTINGS_FIELD_DEFAULTS = { 'direction' : 'UNIDIRECTIONAL', } +SET_INTERFACE_SETTINGS_FIELD_DEFAULTS = { + 'mtu' : 1500, +} + def find_custom_config_rule(config_rules : List, resource_name : str) -> Optional[Dict]: resource_value : Optional[Dict] = None for config_rule in config_rules: @@ -108,6 +112,11 @@ def compose_tapi_config_rules(main_service_config_rules : List, subservice_confi for rule_name, defaults in CONFIG_RULES: compose_config_rules(main_service_config_rules, subservice_config_rules, rule_name, defaults) +def compose_setinterface_config_rules(main_service_config_rules : List, subservice_config_rules : List) -> None: + CONFIG_RULES: List[Tuple[str, dict]] = [(SETTINGS_RULE_NAME, SET_INTERFACE_SETTINGS_FIELD_DEFAULTS)] + for rule_name, defaults in CONFIG_RULES: + compose_config_rules(main_service_config_rules, subservice_config_rules, rule_name, defaults) + def compose_device_config_rules( config_rules : List, subservice_config_rules : List, path_hops : List, device_name_mapping : Dict[str, str], endpoint_name_mapping : Dict[Tuple[str, str], str] @@ -153,10 +162,34 @@ def compose_device_config_rules( device_endpoint_keys = set(itertools.product(device_keys, endpoint_keys)) if len(device_endpoint_keys.intersection(endpoints_traversed)) == 0: continue - + LOGGER.debug('[compose_device_config_rules] adding acl config rule') subservice_config_rules.append(config_rule) + elif config_rule.WhichOneof('config_rule') == 'set_interface': + LOGGER.debug('[compose_device_config_rules] is set_interface') + endpoint_id = config_rule.set_interface.endpoint_id + device_uuid_or_name = endpoint_id.device_id.device_uuid.uuid + LOGGER.debug('[compose_device_config_rules] device_uuid_or_name={:s}'.format(str(device_uuid_or_name))) + device_name_or_uuid = device_name_mapping.get(device_uuid_or_name, device_uuid_or_name) + LOGGER.debug('[compose_device_config_rules] device_name_or_uuid={:s}'.format(str(device_name_or_uuid))) + device_keys = {device_uuid_or_name, device_name_or_uuid} + if len(device_keys.intersection(devices_traversed)) == 0: continue + + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + LOGGER.debug('[compose_device_config_rules] endpoint_uuid={:s}'.format(str(endpoint_uuid))) + endpoint_uuid_or_name = (endpoint_uuid[::-1].split('.', maxsplit=1)[-1])[::-1] + LOGGER.debug('[compose_device_config_rules] endpoint_uuid_or_name={:s}'.format(str(endpoint_uuid_or_name))) + endpoint_name_or_uuid_1 = endpoint_name_mapping[(device_uuid_or_name, endpoint_uuid_or_name)] + endpoint_name_or_uuid_2 = endpoint_name_mapping[(device_name_or_uuid, endpoint_uuid_or_name)] + endpoint_keys = {endpoint_uuid_or_name, endpoint_name_or_uuid_1, endpoint_name_or_uuid_2} + + device_endpoint_keys = set(itertools.product(device_keys, endpoint_keys)) + if len(device_endpoint_keys.intersection(endpoints_traversed)) == 0: continue + + LOGGER.debug('[compose_device_config_rules] adding set_interface config rule') + subservice_config_rules.append(config_rule) + elif config_rule.WhichOneof('config_rule') == 'custom': LOGGER.debug('[compose_device_config_rules] is custom') @@ -292,49 +325,55 @@ def generate_neighbor_endpoint_config_rules( for config_rule in config_rules: # Only applicable, by now, to Custom Config Rules for endpoint settings - if 'custom' not in config_rule: continue - match = RE_ENDPOINT_SETTINGS.match(config_rule['custom']['resource_key']) - if match is None: - match = RE_ENDPOINT_VLAN_SETTINGS.match(config_rule['custom']['resource_key']) - if match is None: continue - - resource_key_values = match.groups() - if resource_key_values[0:2] in device_endpoint_keys_a: - resource_key_values = list(resource_key_values) - resource_key_values[0] = link_endpoint_b['device'] - resource_key_values[1] = link_endpoint_b['ingress_ep'] - elif resource_key_values[0:2] in device_endpoint_keys_b: - resource_key_values = list(resource_key_values) - resource_key_values[0] = link_endpoint_a['device'] - resource_key_values[1] = link_endpoint_a['egress_ep'] + if 'custom' not in config_rule or 'set_interface' not in config_rule: continue + if 'custom' in config_rule: + match = RE_ENDPOINT_SETTINGS.match(config_rule['custom']['resource_key']) + if match is None: + match = RE_ENDPOINT_VLAN_SETTINGS.match(config_rule['custom']['resource_key']) + if match is None: continue + resource_key_values = match.groups() + if resource_key_values[0:2] in device_endpoint_keys_a: + resource_key_values = list(resource_key_values) + resource_key_values[0] = link_endpoint_b['device'] + resource_key_values[1] = link_endpoint_b['ingress_ep'] + elif resource_key_values[0:2] in device_endpoint_keys_b: + resource_key_values = list(resource_key_values) + resource_key_values[0] = link_endpoint_a['device'] + resource_key_values[1] = link_endpoint_a['egress_ep'] + else: + continue + + device_keys = compute_device_keys(resource_key_values[0], device_name_mapping) + device_names = {device_key for device_key in device_keys if RE_UUID.match(device_key) is None} + if len(device_names) != 1: + MSG = 'Unable to identify name for Device({:s}): device_keys({:s})' + raise Exception(MSG.format(str(resource_key_values[0]), str(device_keys))) + resource_key_values[0] = device_names.pop() + + endpoint_keys = compute_endpoint_keys(device_keys, resource_key_values[1], endpoint_name_mapping) + endpoint_names = {endpoint_key for endpoint_key in endpoint_keys if RE_UUID.match(endpoint_key) is None} + if len(endpoint_names) != 1: + MSG = 'Unable to identify name for Endpoint({:s}): endpoint_keys({:s})' + raise Exception(MSG.format(str(resource_key_values[1]), str(endpoint_keys))) + resource_key_values[1] = endpoint_names.pop() + + resource_value : Dict = json.loads(config_rule['custom']['resource_value']) + if 'neighbor_address' not in resource_value: continue + resource_value['ip_address'] = resource_value.pop('neighbor_address') + + # remove neighbor_address also from original rule as it is already consumed + + resource_key_template = TMPL_ENDPOINT_VLAN_SETTINGS if len(match.groups()) == 3 else TMPL_ENDPOINT_SETTINGS + generated_config_rule = copy.deepcopy(config_rule) + generated_config_rule['custom']['resource_key'] = resource_key_template.format(*resource_key_values) + generated_config_rule['custom']['resource_value'] = json.dumps(resource_value) + generated_config_rules.append(generated_config_rule) else: - continue - - device_keys = compute_device_keys(resource_key_values[0], device_name_mapping) - device_names = {device_key for device_key in device_keys if RE_UUID.match(device_key) is None} - if len(device_names) != 1: - MSG = 'Unable to identify name for Device({:s}): device_keys({:s})' - raise Exception(MSG.format(str(resource_key_values[0]), str(device_keys))) - resource_key_values[0] = device_names.pop() - - endpoint_keys = compute_endpoint_keys(device_keys, resource_key_values[1], endpoint_name_mapping) - endpoint_names = {endpoint_key for endpoint_key in endpoint_keys if RE_UUID.match(endpoint_key) is None} - if len(endpoint_names) != 1: - MSG = 'Unable to identify name for Endpoint({:s}): endpoint_keys({:s})' - raise Exception(MSG.format(str(resource_key_values[1]), str(endpoint_keys))) - resource_key_values[1] = endpoint_names.pop() - - resource_value : Dict = json.loads(config_rule['custom']['resource_value']) - if 'neighbor_address' not in resource_value: continue - resource_value['ip_address'] = resource_value.pop('neighbor_address') - - # remove neighbor_address also from original rule as it is already consumed - - resource_key_template = TMPL_ENDPOINT_VLAN_SETTINGS if len(match.groups()) == 3 else TMPL_ENDPOINT_SETTINGS - generated_config_rule = copy.deepcopy(config_rule) - generated_config_rule['custom']['resource_key'] = resource_key_template.format(*resource_key_values) - generated_config_rule['custom']['resource_value'] = json.dumps(resource_value) - generated_config_rules.append(generated_config_rule) + LOGGER.debug('[generate_neighbor_endpoint_config_rules] SET_INTERFACE: {:s}'.format(str(config_rule))) + resource_value : Dict = config_rule['set_interface'] + generated_config_rule = copy.deepcopy(config_rule) + generated_config_rule['set_interface'] = resource_value + generated_config_rules.append(generated_config_rule) LOGGER.debug('[generate_neighbor_endpoint_config_rules] generated_config_rules={:s}'.format(str(generated_config_rules))) LOGGER.debug('[generate_neighbor_endpoint_config_rules] end') diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index 2f55db0c6..c98bc8f70 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -46,9 +46,10 @@ SERVICE_TYPE_L2NM = {ServiceTypeEnum.SERVICETYPE_L2NM} SERVICE_TYPE_L3NM = {ServiceTypeEnum.SERVICETYPE_L3NM} SERVICE_TYPE_LXNM = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM} SERVICE_TYPE_TAPI = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE} +SERVICE_TYPE_SET_INTERFACE = {ServiceTypeEnum.SERVICETYPE_SET_INTERFACE} def get_service_type(device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum) -> ServiceTypeEnum: - if device_type in PACKET_DEVICE_TYPES and prv_service_type in SERVICE_TYPE_LXNM: return prv_service_type + if device_type in PACKET_DEVICE_TYPES and (prv_service_type in SERVICE_TYPE_LXNM or prv_service_type in SERVICE_TYPE_SET_INTERFACE ): return prv_service_type if device_type in L2_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_L2NM if device_type in OPTICAL_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE if device_type in NETWORK_DEVICE_TYPES: return prv_service_type diff --git a/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java b/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java index f9eefd5d2..0f31a99a8 100644 --- a/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java +++ b/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java @@ -1177,6 +1177,8 @@ public class Serializer { return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_L3NM; case TAPI_CONNECTIVITY_SERVICE: return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE; + case SET_INTERFACE: + return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_SET_INTERFACE; case UNKNOWN: return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_UNKNOWN; default: @@ -1192,6 +1194,8 @@ public class Serializer { return ServiceTypeEnum.L3NM; case SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: return ServiceTypeEnum.TAPI_CONNECTIVITY_SERVICE; + case SERVICETYPE_SET_INTERFACE: + return ServiceTypeEnum.SET_INTERFACE; case SERVICETYPE_UNKNOWN: case UNRECOGNIZED: default: diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index a56bcf0f9..9183479c4 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -31,6 +31,7 @@ SERVICE_TYPE_VALUES = { ServiceTypeEnum.SERVICETYPE_QKD, ServiceTypeEnum.SERVICETYPE_INT, ServiceTypeEnum.SERVICETYPE_ACL, + ServiceTypeEnum.SERVICETYPE_SET_INTERFACE, } DEVICE_DRIVER_VALUES = { diff --git a/src/service/service/service_handler_api/SettingsHandler.py b/src/service/service/service_handler_api/SettingsHandler.py index b9b8b2950..f116b933c 100644 --- a/src/service/service/service_handler_api/SettingsHandler.py +++ b/src/service/service/service_handler_api/SettingsHandler.py @@ -47,6 +47,13 @@ class SettingsHandler: ACL_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/index[{:d}]/acl_ruleset[{:s}]' key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_name,endpoint_index, acl_ruleset_name) value = grpc_message_to_json(config_rule.acl) + elif kind == 'set_interface': + device_uuid = config_rule.set_interface.endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = config_rule.set_interface.endpoint_id.endpoint_uuid.uuid + endpoint_name, endpoint_index = extract_endpoint_index(endpoint_uuid) + SET_INTERFACE_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/subindex[{:d}]/set_interface' + key_or_path = SET_INTERFACE_KEY_TEMPLATE.format(device_uuid, endpoint_name, endpoint_index) + value = config_rule.set_interface else: MSG = 'Unsupported Kind({:s}) in ConfigRule({:s})' LOGGER.warning(MSG.format(str(kind), grpc_message_to_json_string(config_rule))) @@ -105,6 +112,28 @@ class SettingsHandler: if not 'index[{:d}]'.format(acl_index) in res_key: continue acl_rules.append((res_key, res_value)) return acl_rules + + def get_endpoint_set_interface(self, device : Device, endpoint : EndPoint) -> List [Tuple]: + endpoint_name = endpoint.name + device_keys = device.device_id.device_uuid.uuid, device.name + endpoint_keys = endpoint.endpoint_id.endpoint_uuid.uuid, endpoint.name + set_interfaces = [] + for device_key in device_keys: + for endpoint_key in endpoint_keys: + endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]'.format(device_key, endpoint_key) + endpoint_settings = self.get(endpoint_settings_uri) + if endpoint_settings is None: continue + SET_INTERFACE_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/'.format(device_key, endpoint_name) + + results = dump_subtree(endpoint_settings) + for res_key, res_value in results: + if not res_key.startswith(SET_INTERFACE_KEY_TEMPLATE): continue + if not "set_interface" in res_key: continue + set_interfaces.append((res_key, res_value)) + setinterface_index = extract_index(res_value) + if not 'subindex[{:d}]'.format(setinterface_index) in res_key: continue + set_interfaces.append((res_key, res_value)) + return set_interfaces def set(self, key_or_path : Union[str, List[str]], value : Any) -> None: set_subnode_value(self.__resolver, self.__config, key_or_path, value) diff --git a/src/service/service/service_handler_api/Tools.py b/src/service/service/service_handler_api/Tools.py index fa132909a..20a2fa301 100644 --- a/src/service/service/service_handler_api/Tools.py +++ b/src/service/service/service_handler_api/Tools.py @@ -61,16 +61,16 @@ def get_device_endpoint_uuids(endpoint : Tuple[str, str, Optional[str]]) -> Tupl return device_uuid, endpoint_uuid def extract_endpoint_index(endpoint_name : str, default_index=0) -> Tuple[str, int]: - RE_PATTERN = '^(eth\-[0-9]+(?:\/[0-9]+)*)(?:\.([0-9]+))?$' + RE_PATTERN = r'^(eth\-[0-9]+(?:\/[0-9]+)*)(?:\.([0-9]+))?$' m = re.match(RE_PATTERN, endpoint_name) if m is None: return endpoint_name, default_index endpoint_name, index = m.groups() if index is not None: index = int(index) return endpoint_name, index -def extract_index(res_value : str) -> int: - acl_value = grpc_message_to_json(res_value,use_integers_for_enums=True) - endpoint = acl_value.split("'endpoint_uuid': {'uuid': '") +def extract_index(res_value : Any) -> int: + res_value = grpc_message_to_json(res_value,use_integers_for_enums=True) + endpoint = res_value['endpoint_id']['endpoint_uuid']['uuid'] endpoint = endpoint[1].split("'}") _ , index = extract_endpoint_index(endpoint[0]) return index diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 1aba88e30..bd0ba4441 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -30,6 +30,7 @@ from .p4_fabric_tna_int.p4_fabric_tna_int_service_handler import P4FabricINTServ from .p4_fabric_tna_l2_simple.p4_fabric_tna_l2_simple_service_handler import P4FabricL2SimpleServiceHandler from .p4_fabric_tna_l3.p4_fabric_tna_l3_service_handler import P4FabricL3ServiceHandler from .p4_fabric_tna_acl.p4_fabric_tna_acl_service_handler import P4FabricACLServiceHandler +from .set_interface.Set_InterfaceServiceHandler import Set_InterfaceServiceHandler from .tapi_tapi.TapiServiceHandler import TapiServiceHandler from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler from .optical_tfs.OpticalTfsServiceHandler import OpticalTfsServiceHandler @@ -178,5 +179,11 @@ SERVICE_HANDLERS = [ FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_RYU], } + ]), + (Set_InterfaceServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_SET_INTERFACE, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, + } ]) ] diff --git a/src/service/service/service_handlers/set_interface/ConfigRules.py b/src/service/service/service_handlers/set_interface/ConfigRules.py new file mode 100644 index 000000000..09087b434 --- /dev/null +++ b/src/service/service/service_handlers/set_interface/ConfigRules.py @@ -0,0 +1,59 @@ + +# Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Any, Dict, List, Optional, Tuple +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from service.service.service_handler_api.AnyTreeTools import TreeNode +LOGGER = logging.getLogger(__name__) + +def get_value(field_name : str, *containers, default=None) -> Optional[Any]: + if len(containers) == 0: raise Exception('No containers specified') + for container in containers: + if field_name not in container: continue + return container[field_name] + return default + +def setup_config_rules( + endpoint_name : str, endpoint_set_interface : List [Tuple] +) -> List[Dict]: + + json_config_rules = [ + ] + + for res_key, res_value in endpoint_set_interface: + json_config_rules.append( + {'action': 1, 'set_interface': res_value} + ) + + return json_config_rules + +def teardown_config_rules( + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, + service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode +) -> List[Dict]: + + if service_settings is None: return [] + if device_settings is None: return [] + if endpoint_settings is None: return [] + + json_settings : Dict = service_settings.value + json_device_settings : Dict = device_settings.value + json_endpoint_settings : Dict = endpoint_settings.value + + settings = (json_settings, json_endpoint_settings, json_device_settings) + + json_config_rules = [] + return json_config_rules \ No newline at end of file diff --git a/src/service/service/service_handlers/set_interface/Set_InterfaceServiceHandler.py b/src/service/service/service_handlers/set_interface/Set_InterfaceServiceHandler.py new file mode 100644 index 000000000..b42c8d8e7 --- /dev/null +++ b/src/service/service/service_handlers/set_interface/Set_InterfaceServiceHandler.py @@ -0,0 +1,179 @@ +# Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, DeviceId, Service +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from .ConfigRules import setup_config_rules, teardown_config_rules + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_openconfig'}) + +class Set_InterfaceServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(service.service_config, **settings) + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + LOGGER.debug("[SetEndpoint] Called with endpoints: %s", endpoints) + LOGGER.debug("[SetEndpoint] Connection UUID: %s", connection_uuid) + + if len(endpoints) == 0: + LOGGER.warning("[SetEndpoint] No endpoints to process.") + return [] + + results = [] + for endpoint in endpoints: + LOGGER.debug("[SetEndpoint] Processing endpoint tuple: %s", endpoint) + try: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) + LOGGER.debug("[SetEndpoint] Device UUID: %s | Endpoint UUID: %s", device_uuid, endpoint_uuid) + + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + + endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) + + endpoint_set_interface = self.__settings_handler.get_endpoint_set_interface(device_obj, endpoint_obj) + LOGGER.debug("[SetEndpoint] endpoint_set_interface: %s", endpoint_set_interface) + + endpoint_name = endpoint_obj.name + LOGGER.debug("[SetEndpoint] Endpoint name: %s", endpoint_name) + + json_config_rules = setup_config_rules(endpoint_name, endpoint_set_interface) + LOGGER.debug("[SetEndpoint] Generated json_config_rules: %s", json_config_rules) + + if len(json_config_rules) > 0: + LOGGER.info("[SetEndpoint] Applying %d config rules to device %s", len(json_config_rules), device_uuid) + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + LOGGER.debug("[SetEndpoint] Adding config rule: %s", json_config_rule) + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + + self.__task_executor.configure_device(device_obj) + LOGGER.info("[SetEndpoint] Configuration sent for device %s", device_uuid) + else: + LOGGER.warning("[SetEndpoint] No config rules generated for endpoint %s", endpoint_uuid) + + results.append(True) + + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[SetEndpoint] Unable to SetEndpoint(%s)', str(endpoint)) + results.append(e) + + LOGGER.debug("[SetEndpoint] Final results: %s", results) + return results + + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + service_uuid = self.__service.service_id.service_uuid.uuid + settings = self.__settings_handler.get('/settings') + + results = [] + for endpoint in endpoints: + try: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) + + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_settings = self.__settings_handler.get_device_settings(device_obj) + endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) + endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + endpoint_name = endpoint_obj.name + + json_config_rules = teardown_config_rules( + service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, + settings, device_settings, endpoint_settings) + + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint))) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + resource_value = json.loads(resource[1]) + self.__settings_handler.set(resource[0], resource_value) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource))) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + self.__settings_handler.delete(resource[0]) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource))) + results.append(e) + + return results \ No newline at end of file diff --git a/src/service/service/service_handlers/set_interface/__init__.py b/src/service/service/service_handlers/set_interface/__init__.py new file mode 100644 index 000000000..6242c89c7 --- /dev/null +++ b/src/service/service/service_handlers/set_interface/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -- GitLab