diff --git a/deploy/all.sh b/deploy/all.sh index a99607f5b907c2bd1e1b4b889bef881874a63967..6f5592cb43a5f214b2536226bb857629ad0c3cf0 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -25,14 +25,14 @@ # By default, assume internal MicroK8s registry is used. export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} -# If not already set, set the list of components you want to build images for, and deploy. +# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} -# If not already set, set the name of the Kubernetes namespace to deploy to. +# If not already set, set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # If not already set, set additional manifest files to be applied after the deployment @@ -41,7 +41,7 @@ export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} # If not already set, set the new Grafana admin password export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} -# If not already set, disable skip-build flag. +# If not already set, disable skip-build flag to rebuild the Docker images. # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} @@ -60,12 +60,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} # If not already set, set the database name to be used by Context. export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} -# If not already set, set the name of the secret where CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAME=${CRDB_SECRET_NAME:-"crdb-data"} - -# If not already set, set the namespace where the secret containing CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} - # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. # "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while # checking/deploying CockroachDB. @@ -78,7 +72,7 @@ export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} # Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} -# If not already set, disable flag for dropping database if exists. +# If not already set, disable flag for dropping database, if it exists. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while # checking/deploying CockroachDB. @@ -96,12 +90,6 @@ export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""} # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} -# If not already set, set the name of the secret where NATS data and credentials will be stored. -export NATS_SECRET_NAME=${NATS_SECRET_NAME:-"nats-data"} - -# If not already set, set the namespace where the secret containing NATS data and credentials will be stored. -export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} - # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. @@ -113,20 +101,24 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} -# If not already set, set the database username to be used by Monitoring. +# If not already set, set the database username to be used for QuestDB. export QDB_USERNAME=${QDB_USERNAME:-"admin"} -# If not already set, set the database user's password to be used by Monitoring. +# If not already set, set the database user's password to be used for QuestDB. export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} + +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} -## If not already set, disable flag for dropping table if exists. -## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! -## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while -## checking/deploying QuestDB. -#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""} +# If not already set, disable flag for dropping tables if they exist. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! +# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables +# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while +# checking/deploying QuestDB. +export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} # If not already set, disable flag for re-deploying QuestDB from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! diff --git a/deploy/crdb.sh b/deploy/crdb.sh index 98d011f190196b803be27200b8bc348b30c87055..4e8cfe2c399fb0e943c90e5c585f93f0707ca835 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -66,9 +66,6 @@ CRDB_MANIFESTS_PATH="manifests/cockroachdb" # Create a tmp folder for files modified during the deployment TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" mkdir -p $TMP_MANIFESTS_FOLDER -TMP_LOGS_FOLDER="$TMP_FOLDER/logs" -mkdir -p $TMP_LOGS_FOLDER -CRDB_LOG_FILE="$TMP_LOGS_FOLDER/crdb_deploy.log" function crdb_deploy_single() { echo "CockroachDB Namespace" diff --git a/deploy/nats.sh b/deploy/nats.sh index 115a185302236b80db385212cd772100392329af..9edbc7765a09135d62a6021c5f2b0669e36a69a4 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -31,14 +31,6 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} # Automated steps start here ######################################################################################################################## -# Constants -TMP_FOLDER="./tmp" -NATS_MANIFESTS_PATH="manifests/nats" - -# Create a tmp folder for files modified during the deployment -TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" -mkdir -p $TMP_MANIFESTS_FOLDER - function nats_deploy_single() { echo "NATS Namespace" echo ">>> Create NATS Namespace (if missing)" diff --git a/deploy/qdb.sh b/deploy/qdb.sh index d9a4de353b3309ef0a8a34310089e9bff31589fa..d94c000bf8d40c72faa255e7c6554926b6f683d3 100755 --- a/deploy/qdb.sh +++ b/deploy/qdb.sh @@ -21,20 +21,24 @@ # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} -# If not already set, set the database username to be used by Monitoring. +# If not already set, set the database username to be used for QuestDB. export QDB_USERNAME=${QDB_USERNAME:-"admin"} -# If not already set, set the database user's password to be used by Monitoring. +# If not already set, set the database user's password to be used for QuestDB. export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} -## If not already set, disable flag for dropping table if exists. -## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! -## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while -## checking/deploying QuestDB. -#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""} +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} + +# If not already set, disable flag for dropping tables if they exist. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! +# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables +# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped +# while checking/deploying QuestDB. +export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} # If not already set, disable flag for re-deploying QuestDB from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! @@ -52,9 +56,6 @@ QDB_MANIFESTS_PATH="manifests/questdb" # Create a tmp folder for files modified during the deployment TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" -TMP_LOGS_FOLDER="$TMP_FOLDER/logs" -QDB_LOG_FILE="$TMP_LOGS_FOLDER/qdb_deploy.log" -mkdir -p $TMP_LOGS_FOLDER function qdb_deploy() { echo "QuestDB Namespace" @@ -147,19 +148,20 @@ function qdb_undeploy() { echo } -# TODO: implement method to drop table -#function qdb_drop_table() { -# echo "Drop table if exists" -# QDB_CLIENT_URL="postgresql://${QDB_USERNAME}:${QDB_PASSWORD}@questdb-0:${QDB_SQL_PORT}/defaultdb?sslmode=require" -# kubectl exec -it --namespace ${QDB_NAMESPACE} questdb-0 -- \ -# ./qdb sql --certs-dir=/qdb/qdb-certs --url=${QDB_CLIENT_URL} \ -# --execute "DROP TABLE IF EXISTS ${QDB_TABLE};" -# echo -#} +function qdb_drop_tables() { + QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}') + QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') + + echo "Drop tables, if exist" + curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;" + echo + curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;" + echo +} if [ "$QDB_REDEPLOY" == "YES" ]; then qdb_undeploy -#elif [ "$QDB_DROP_TABLE_IF_EXISTS" == "YES" ]; then -# qdb_drop_table +elif [ "$QDB_DROP_TABLES_IF_EXIST" == "YES" ]; then + qdb_drop_tables fi qdb_deploy diff --git a/deploy/tfs.sh b/deploy/tfs.sh index b9bcbab4d8084e30aae90be3cf669445d01c0dac..16cf5c13bd4532aac0267b7904c6c403d7ac057c 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -18,18 +18,21 @@ # Read deployment settings ######################################################################################################################## + +# ----- TeraFlowSDN ------------------------------------------------------------ + # If not already set, set the URL of the Docker registry where the images will be uploaded to. # By default, assume internal MicroK8s registry is used. export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} -# If not already set, set the list of components you want to build images for, and deploy. +# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} -# If not already set, set the name of the Kubernetes namespace to deploy to. +# If not already set, set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # If not already set, set additional manifest files to be applied after the deployment @@ -38,10 +41,13 @@ export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} # If not already set, set the new Grafana admin password export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} -# If not already set, disable skip-build flag. +# If not already set, disable skip-build flag to rebuild the Docker images. # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} + +# ----- CockroachDB ------------------------------------------------------------ + # If not already set, set the namespace where CockroackDB will be deployed. export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} @@ -54,20 +60,29 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} # If not already set, set the database name to be used by Context. export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + +# ----- NATS ------------------------------------------------------------------- + # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} + +# ----- QuestDB ---------------------------------------------------------------- + # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} -# If not already set, set the database username to be used by Monitoring. +# If not already set, set the database username to be used for QuestDB. export QDB_USERNAME=${QDB_USERNAME:-"admin"} -# If not already set, set the database user's password to be used by Monitoring. +# If not already set, set the database user's password to be used for QuestDB. export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} + +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} ######################################################################################################################## @@ -85,7 +100,7 @@ TMP_LOGS_FOLDER="$TMP_FOLDER/logs" mkdir -p $TMP_LOGS_FOLDER echo "Deleting and Creating a new namespace..." -kubectl delete namespace $TFS_K8S_NAMESPACE +kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found kubectl create namespace $TFS_K8S_NAMESPACE printf "\n" @@ -118,7 +133,8 @@ kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type=' --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \ --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \ --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \ - --from-literal=METRICSDB_TABLE=${QDB_TABLE} \ + --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \ + --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \ --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \ --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} printf "\n" @@ -301,28 +317,34 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" # Configure Grafana Admin Password # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}" - echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..." + + echo ">> Updating Grafana 'admin' password..." curl -X PUT -H "Content-Type: application/json" -d '{ "oldPassword": "'${GRAFANA_PASSWORD}'", "newPassword": "'${TFS_GRAFANA_PASSWORD}'", "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'" }' ${GRAFANA_URL_DEFAULT}/api/user/password echo + echo # Updated Grafana API URL GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}" echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT + echo ">> Installing Scatter Plot plugin..." + curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \ + ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install + echo + # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/ - # TODO: replace user, password and database by variables to be saved QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}" - echo "Creating a datasource..." + echo ">> Creating datasources..." curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ "access" : "proxy", "type" : "postgres", - "name" : "questdb", + "name" : "questdb-mon-kpi", "url" : "'${QDB_HOST_PORT}'", - "database" : "'${QDB_TABLE}'", + "database" : "'${QDB_TABLE_MONITORING_KPIS}'", "user" : "'${QDB_USERNAME}'", "basicAuth": false, "isDefault": true, @@ -342,16 +364,51 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" }' ${GRAFANA_URL_UPDATED}/api/datasources echo - # Create Monitoring Dashboard + curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ + "access" : "proxy", + "type" : "postgres", + "name" : "questdb-slc-grp", + "url" : "'${QDB_HOST_PORT}'", + "database" : "'${QDB_TABLE_SLICE_GROUPS}'", + "user" : "'${QDB_USERNAME}'", + "basicAuth": false, + "isDefault": false, + "jsonData" : { + "sslmode" : "disable", + "postgresVersion" : 1100, + "maxOpenConns" : 0, + "maxIdleConns" : 2, + "connMaxLifetime" : 14400, + "tlsAuth" : false, + "tlsAuthWithCACert" : false, + "timescaledb" : false, + "tlsConfigurationMethod": "file-path", + "tlsSkipVerify" : true + }, + "secureJsonData": {"password": "'${QDB_PASSWORD}'"} + }' ${GRAFANA_URL_UPDATED}/api/datasources + printf "\n\n" + + echo ">> Creating dashboards..." # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/ - curl -X POST -H "Content-Type: application/json" \ - -d '@src/webui/grafana_dashboard_psql.json' \ + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \ + ${GRAFANA_URL_UPDATED}/api/dashboards/db + echo + + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \ ${GRAFANA_URL_UPDATED}/api/dashboards/db + printf "\n\n" + + echo ">> Staring dashboards..." + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit" + DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') + curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} echo - DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tf-l3-monit" + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps" DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} + echo printf "\n\n" fi diff --git a/hackfest/mock_osm/__main__.py b/hackfest/mock_osm/__main__.py index 669da2b5e6a1729f35d2958f2d7aa68c0413287d..4ed25eaedbf4eba1f04ea41c72a751ecd7d6380b 100644 --- a/hackfest/mock_osm/__main__.py +++ b/hackfest/mock_osm/__main__.py @@ -58,13 +58,11 @@ SERVICE_CONNECTION_POINTS = [ class MockOSMShell(cmd.Cmd): intro = 'Welcome to the MockOSM shell.\nType help or ? to list commands.\n' prompt = '(mock-osm) ' - file = None def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.mock_osm = MockOSM(WIM_URL, WIM_PORT_MAPPING, WIM_USERNAME, WIM_PASSWORD) - # ----- basic turtle commands ----- def do_create(self, arg): 'Create an ELINE (L2) service' service_uuid = self.mock_osm.create_connectivity_service( diff --git a/hackfest/tfs-descriptors/device-all.json b/hackfest/tfs-descriptors/device-all.json index 8cb8e031488f0dd1fa4176b8d20d01fe2d24abc9..36a93fe98d253dbc5e6db7f91b3b890e529c2ffc 100644 --- a/hackfest/tfs-descriptors/device-all.json +++ b/hackfest/tfs-descriptors/device-all.json @@ -9,7 +9,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, @@ -26,7 +26,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, @@ -43,7 +43,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, @@ -60,7 +60,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, diff --git a/hackfest/tfs-descriptors/device-netconf-openconfig.json b/hackfest/tfs-descriptors/device-netconf-openconfig.json index 7e01f037e744493a8cd1190b2510ed3d4d1c86aa..490e36efde5b428781b945ccc5060eb7b29a558a 100644 --- a/hackfest/tfs-descriptors/device-netconf-openconfig.json +++ b/hackfest/tfs-descriptors/device-netconf-openconfig.json @@ -9,7 +9,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 15} }}} ]}, diff --git a/hackfest/tfs-descriptors/old/device.json b/hackfest/tfs-descriptors/old/device.json index 03736314dee9ea0a8aae27627361dcdd24457fca..abe529e729955b8048c00fd688ce1d1a8b5a0285 100644 --- a/hackfest/tfs-descriptors/old/device.json +++ b/hackfest/tfs-descriptors/old/device.json @@ -27,7 +27,7 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.15"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8301"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"delete_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"commit_per_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} ]}, "device_operational_status": 1, "device_drivers": [1], @@ -39,7 +39,7 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.15"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8302"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"delete_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"commit_per_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} ]}, "device_operational_status": 1, "device_drivers": [1], @@ -51,7 +51,7 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.15"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8303"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"delete_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"commit_per_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} ]}, "device_operational_status": 1, "device_drivers": [1], diff --git a/hackfest/tfs-descriptors/old/service.json b/hackfest/tfs-descriptors/old/service.json index a25d0171dbfdbf174a877151201752c76759514a..26804dcf133fa6c83be70a72374b0f19435d24d6 100644 --- a/hackfest/tfs-descriptors/old/service.json +++ b/hackfest/tfs-descriptors/old/service.json @@ -18,8 +18,8 @@ {"device_id":{"device_uuid":{"uuid":"R2"}},"endpoint_uuid":{"uuid":"1/3"}} ], "service_constraints":[ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "20.0"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 20.0}} ], "service_config":{"config_rules":[]} } diff --git a/hackfest/tfs-descriptors/service-l3vpn.json b/hackfest/tfs-descriptors/service-l3vpn.json index 457ba1a509aebc5eaea8caa37a09ac62ef286f32..723453b8b3d43a56386e15dec6f70fc368bca517 100644 --- a/hackfest/tfs-descriptors/service-l3vpn.json +++ b/hackfest/tfs-descriptors/service-l3vpn.json @@ -12,8 +12,8 @@ {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "1/2"}} ], "service_constraints": [ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "15.2"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 15.2}} ], "service_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "/settings", "resource_value": { diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index 447f6a1c77cc6862db3df3e83b73add3257a5c0d..49e2b5943d20586941f80e8fc4b5c32c99d70f8e 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -37,6 +37,11 @@ spec: env: - name: LOG_LEVEL value: "INFO" + - name: SLICE_GROUPING + value: "DISABLE" + envFrom: + - secretRef: + name: qdb-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:4040"] diff --git a/my_deploy.sh b/my_deploy.sh index 6f0e64afe311b8e56446caabfac6329024c207a9..518b90f280a0d885169e00ce2fc728ca01f4635a 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -56,7 +56,7 @@ export CRDB_DATABASE="tfs" # See ./deploy/all.sh or ./deploy/crdb.sh for additional details export CRDB_DEPLOY_MODE="single" -# Disable flag for dropping database, if exists. +# Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. @@ -74,20 +74,23 @@ export NATS_REDEPLOY="" # ----- QuestDB ---------------------------------------------------------------- -# If not already set, set the namespace where QuestDB will be deployed. +# Set the namespace where QuestDB will be deployed. export QDB_NAMESPACE="qdb" -# If not already set, set the database username to be used by Monitoring. +# Set the database username to be used for QuestDB. export QDB_USERNAME="admin" -# If not already set, set the database user's password to be used by Monitoring. +# Set the database user's password to be used for QuestDB. export QDB_PASSWORD="quest" -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE="tfs_monitoring" +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" -## If not already set, disable flag for dropping table if exists. -#export QDB_DROP_TABLE_IF_EXISTS="" +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" -# If not already set, disable flag for re-deploying QuestDB from scratch. +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/proto/context.proto b/proto/context.proto index e403c4a22f2df62f695041c094cc1c6e6a193d5f..49d16229cdac5de84f25cfaa7d196d25184f46f0 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -509,6 +509,7 @@ message Constraint_SLA_Capacity { message Constraint_SLA_Availability { uint32 num_disjoint_paths = 1; bool all_active = 2; + float availability = 3; // 0.0 .. 100.0 percentage of availability } enum IsolationLevelEnum { diff --git a/proto/load_generator.proto b/proto/load_generator.proto index 98f6eefda88db7abac4651857326952789a879ba..86f9469588f1586da5339edad198e39e82598cde 100644 --- a/proto/load_generator.proto +++ b/proto/load_generator.proto @@ -18,6 +18,36 @@ package load_generator; import "context.proto"; service LoadGeneratorService { - rpc Start(context.Empty) returns (context.Empty) {} - rpc Stop (context.Empty) returns (context.Empty) {} + rpc Start (Parameters ) returns (context.Empty) {} + rpc GetStatus(context.Empty) returns (Status ) {} + rpc Stop (context.Empty) returns (context.Empty) {} +} + +enum RequestTypeEnum { + REQUESTTYPE_UNDEFINED = 0; + REQUESTTYPE_SERVICE_L2NM = 1; + REQUESTTYPE_SERVICE_L3NM = 2; + REQUESTTYPE_SERVICE_MW = 3; + REQUESTTYPE_SERVICE_TAPI = 4; + REQUESTTYPE_SLICE_L2NM = 5; + REQUESTTYPE_SLICE_L3NM = 6; +} + +message Parameters { + uint64 num_requests = 1; // if == 0, generate infinite requests + repeated RequestTypeEnum request_types = 2; + float offered_load = 3; + float holding_time = 4; + float inter_arrival_time = 5; + bool do_teardown = 6; + bool dry_mode = 7; + bool record_to_dlt = 8; + string dlt_domain_id = 9; +} + +message Status { + Parameters parameters = 1; + uint64 num_generated = 2; + bool infinite_loop = 3; + bool running = 4; } diff --git a/scripts/show_logs_load_generator.sh b/scripts/show_logs_load_generator.sh new file mode 100755 index 0000000000000000000000000000000000000000..d0f2527d74840d48a10e0ec7ba018f513eea2c52 --- /dev/null +++ b/scripts/show_logs_load_generator.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/load-generatorservice diff --git a/src/automation/target/kubernetes/kubernetes.yml b/src/automation/target/kubernetes/kubernetes.yml index c6c61db72ec7d289a41a5fe21cecc940c954c3c1..4dacf3998c3991a441dc374ca6c6abc29e8d3b80 100644 --- a/src/automation/target/kubernetes/kubernetes.yml +++ b/src/automation/target/kubernetes/kubernetes.yml @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/common/Constants.py b/src/common/Constants.py index c0b4cbf0511884148de34fdd891a256796d7d26a..a7bf198a7204677ed3669fc28a2c3528a5936425 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -83,7 +83,6 @@ DEFAULT_SERVICE_HTTP_PORTS = { # Default HTTP/REST-API service base URLs DEFAULT_SERVICE_HTTP_BASEURLS = { - ServiceNameEnum.CONTEXT .value : '/api', - ServiceNameEnum.COMPUTE .value : '/restconf/data', + ServiceNameEnum.COMPUTE .value : '/restconf', ServiceNameEnum.WEBUI .value : None, } diff --git a/src/common/method_wrappers/tests/deploy_specs.sh b/src/common/method_wrappers/tests/deploy_specs.sh index 571990ecabfbf120b517f44fd99b4550a4b8a9a1..1f41d2348e5a2d60c816071ef3414df281caeaaa 100755 --- a/src/common/method_wrappers/tests/deploy_specs.sh +++ b/src/common/method_wrappers/tests/deploy_specs.sh @@ -1,10 +1,11 @@ +#!/bin/bash # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,21 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Set the URL of your local Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -# Supported components are: -# context device automation policy service compute monitoring webui -# interdomain slice pathcomp dlt -# dbscanserving opticalattackmitigator opticalattackdetector -# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector -export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute dlt +export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" -# Set the name of the Kubernetes namespace to deploy to. +# Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment @@ -35,6 +34,63 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicem # Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" -# If not already set, disable skip-build flag. -# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. -export TFS_SKIP_BUILD="NO" #${TFS_SKIP_BUILD:-"YES"} +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set the database name to be used by Context. +export CRDB_DATABASE="tfs" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" diff --git a/src/common/tests/LoadScenario.py b/src/common/tests/LoadScenario.py deleted file mode 100644 index 93cf3708cfc5f8a4296a5cb68772984beefd7563..0000000000000000000000000000000000000000 --- a/src/common/tests/LoadScenario.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from service.client.ServiceClient import ServiceClient -from slice.client.SliceClient import SliceClient - -LOGGER = logging.getLogger(__name__) -LOGGERS = { - 'success': LOGGER.info, - 'danger' : LOGGER.error, - 'error' : LOGGER.error, -} - -def load_scenario_from_descriptor( - descriptor_file : str, context_client : ContextClient, device_client : DeviceClient, - service_client : ServiceClient, slice_client : SliceClient -) -> DescriptorLoader: - with open(descriptor_file, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader( - descriptors, - context_client=context_client, device_client=device_client, - service_client=service_client, slice_client=slice_client) - results = descriptor_loader.process() - - num_errors = 0 - for message,level in compose_notifications(results): - LOGGERS.get(level)(message) - if level != 'success': num_errors += 1 - if num_errors > 0: - MSG = 'Failed to load descriptors in file {:s}' - raise Exception(MSG.format(str(descriptor_file))) - - return descriptor_loader \ No newline at end of file diff --git a/src/common/tools/context_queries/Context.py b/src/common/tools/context_queries/Context.py index d28ca3991fe7de0cdf9d069db413ff528ace4335..a627b9ba5828d31caca8332d7241d28e126895d3 100644 --- a/src/common/tools/context_queries/Context.py +++ b/src/common/tools/context_queries/Context.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.proto.context_pb2 import Context, Empty +import grpc +from typing import Optional +from common.proto.context_pb2 import Context, ContextId, Empty from common.tools.object_factory.Context import json_context from context.client.ContextClient import ContextClient @@ -23,3 +25,17 @@ def create_context( existing_context_uuids = {context_id.context_uuid.uuid for context_id in existing_context_ids.context_ids} if context_uuid in existing_context_uuids: return context_client.SetContext(Context(**json_context(context_uuid))) + +def get_context(context_client : ContextClient, context_uuid : str, rw_copy : bool = False) -> Optional[Context]: + try: + # pylint: disable=no-member + context_id = ContextId() + context_id.context_uuid.uuid = context_uuid + ro_context = context_client.GetContext(context_id) + if not rw_copy: return ro_context + rw_context = Context() + rw_context.CopyFrom(ro_context) + return rw_context + except grpc.RpcError: + #LOGGER.exception('Unable to get Context({:s})'.format(str(context_uuid))) + return None diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py index 7317cc793f5dd46e6a9f741bf259635a5bd0462f..edb640708b17b6734fbde6d759db5a2cdea692ec 100644 --- a/src/common/tools/context_queries/InterDomain.py +++ b/src/common/tools/context_queries/InterDomain.py @@ -136,13 +136,11 @@ def compute_interdomain_path( service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add() service_endpoint_id.CopyFrom(endpoint_id) - constraint_bw = pathcomp_req_svc.service_constraints.add() - constraint_bw.custom.constraint_type = 'bandwidth[gbps]' - constraint_bw.custom.constraint_value = '10.0' + constraint_sla_capacity = pathcomp_req_svc.service_constraints.add() + constraint_sla_capacity.sla_capacity.capacity_gbps = 10.0 - constraint_lat = pathcomp_req_svc.service_constraints.add() - constraint_lat.custom.constraint_type = 'latency[ms]' - constraint_lat.custom.constraint_value = '100.0' + constraint_sla_latency = pathcomp_req_svc.service_constraints.add() + constraint_sla_latency.sla_latency.e2e_latency_ms = 100.0 LOGGER.debug('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) pathcomp_rep = pathcomp_client.Compute(pathcomp_req) diff --git a/src/common/tools/context_queries/Link.py b/src/common/tools/context_queries/Link.py index 83a878bde85ddfe25bc345ed987670164bacf2c6..291cdcf375d942b72008daea5c2c5ff357a994ef 100644 --- a/src/common/tools/context_queries/Link.py +++ b/src/common/tools/context_queries/Link.py @@ -12,11 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Set -from common.proto.context_pb2 import ContextId, Empty, Link, Topology, TopologyId +import grpc +from typing import List, Optional, Set +from common.proto.context_pb2 import ContextId, Empty, Link, LinkId, Topology, TopologyId from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient +def get_link(context_client : ContextClient, link_uuid : str, rw_copy : bool = False) -> Optional[Link]: + try: + # pylint: disable=no-member + link_id = LinkId() + link_id.link_uuid.uuid = link_uuid + ro_link = context_client.GetLink(link_id) + if not rw_copy: return ro_link + rw_link = Link() + rw_link.CopyFrom(ro_link) + return rw_link + except grpc.RpcError: + #LOGGER.exception('Unable to get Link({:s})'.format(str(link_uuid))) + return None + def get_existing_link_uuids(context_client : ContextClient) -> Set[str]: existing_link_ids = context_client.ListLinkIds(Empty()) existing_link_uuids = {link_id.link_uuid.uuid for link_id in existing_link_ids.link_ids} diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py index fc3b008b4004efe5afc270da65246c4635c777c3..0e1d8c7371e87b47bfc47a4242e00039add48e7f 100644 --- a/src/common/tools/descriptor/Loader.py +++ b/src/common/tools/descriptor/Loader.py @@ -15,25 +15,30 @@ # SDN controller descriptor loader # Usage example (WebUI): -# descriptors = json.loads(descriptors_data_from_client) +# descriptors = json.loads( +# descriptors=descriptors_data_from_client, num_workers=10, +# context_client=..., device_client=..., service_client=..., slice_client=...) # descriptor_loader = DescriptorLoader(descriptors) # results = descriptor_loader.process() # for message,level in compose_notifications(results): # flash(message, level) # Usage example (pytest): -# with open('path/to/descriptor.json', 'r', encoding='UTF-8') as f: -# descriptors = json.loads(f.read()) # descriptor_loader = DescriptorLoader( -# descriptors, context_client=..., device_client=..., service_client=..., slice_client=...) +# descriptors_file='path/to/descriptor.json', num_workers=10, +# context_client=..., device_client=..., service_client=..., slice_client=...) # results = descriptor_loader.process() -# loggers = {'success': LOGGER.info, 'danger': LOGGER.error, 'error': LOGGER.error} -# for message,level in compose_notifications(results): -# loggers.get(level)(message) - -import json -from typing import Dict, List, Optional, Tuple, Union -from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology +# check_results(results, descriptor_loader) +# descriptor_loader.validate() +# # do test ... +# descriptor_loader.unload() + +import concurrent.futures, json, logging, operator +from typing import Any, Dict, List, Optional, Tuple, Union +from common.proto.context_pb2 import ( + Connection, Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Service, ServiceId, Slice, SliceId, + Topology, TopologyId) +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient @@ -43,6 +48,13 @@ from .Tools import ( get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices, get_descriptors_add_topologies, split_devices_by_rules) +LOGGER = logging.getLogger(__name__) +LOGGERS = { + 'success': LOGGER.info, + 'danger' : LOGGER.error, + 'error' : LOGGER.error, +} + ENTITY_TO_TEXT = { # name => singular, plural 'context' : ('Context', 'Contexts' ), @@ -65,25 +77,26 @@ TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_o TypeNotification = Tuple[str, str] # message, level TypeNotificationList = List[TypeNotification] -def compose_notifications(results : TypeResults) -> TypeNotificationList: - notifications = [] - for entity_name, action_name, num_ok, error_list in results: - entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] - action_infinitive, action_past = ACTION_TO_TEXT[action_name] - num_err = len(error_list) - for error in error_list: - notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error')) - if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')) - if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger')) - return notifications - class DescriptorLoader: def __init__( - self, descriptors : Union[str, Dict], + self, descriptors : Optional[Union[str, Dict]] = None, descriptors_file : Optional[str] = None, + num_workers : int = 1, context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None, service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None ) -> None: - self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors + if (descriptors is None) == (descriptors_file is None): + raise Exception('Exactly one of "descriptors" or "descriptors_file" is required') + + if descriptors_file is not None: + with open(descriptors_file, 'r', encoding='UTF-8') as f: + self.__descriptors = json.loads(f.read()) + self.__descriptor_file_path = descriptors_file + else: # descriptors is not None + self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors + self.__descriptor_file_path = '' + + self.__num_workers = num_workers + self.__dummy_mode = self.__descriptors.get('dummy_mode' , False) self.__contexts = self.__descriptors.get('contexts' , []) self.__topologies = self.__descriptors.get('topologies' , []) @@ -107,6 +120,24 @@ class DescriptorLoader: self.__results : TypeResults = list() + @property + def descriptor_file_path(self) -> Optional[str]: return self.__descriptor_file_path + + @property + def num_workers(self) -> int: return self.__num_workers + + @property + def context_client(self) -> Optional[ContextClient]: return self.__ctx_cli + + @property + def device_client(self) -> Optional[DeviceClient]: return self.__dev_cli + + @property + def service_client(self) -> Optional[ServiceClient]: return self.__svc_cli + + @property + def slice_client(self) -> Optional[SliceClient]: return self.__slc_cli + @property def contexts(self) -> List[Dict]: return self.__contexts @@ -242,12 +273,108 @@ class DescriptorLoader: #self.__dev_cli.close() #self.__ctx_cli.close() + @staticmethod + def worker(grpc_method, grpc_class, entity) -> Any: + return grpc_method(grpc_class(**entity)) + def _process_descr(self, entity_name, action_name, grpc_method, grpc_class, entities) -> None: num_ok, error_list = 0, [] - for entity in entities: - try: - grpc_method(grpc_class(**entity)) - num_ok += 1 - except Exception as e: # pylint: disable=broad-except - error_list.append(f'{str(entity)}: {str(e)}') + + with concurrent.futures.ThreadPoolExecutor(max_workers=self.__num_workers) as executor: + future_to_entity = { + executor.submit(DescriptorLoader.worker, grpc_method, grpc_class, entity): (i, entity) + for i,entity in enumerate(entities) + } + + for future in concurrent.futures.as_completed(future_to_entity): + i, entity = future_to_entity[future] + try: + _ = future.result() + num_ok += 1 + except Exception as e: # pylint: disable=broad-except + error_list.append((i, f'{str(entity)}: {str(e)}')) + + error_list = [str_error for _,str_error in sorted(error_list, key=operator.itemgetter(0))] self.__results.append((entity_name, action_name, num_ok, error_list)) + + def validate(self) -> None: + self.__ctx_cli.connect() + + contexts = self.__ctx_cli.ListContexts(Empty()) + assert len(contexts.contexts) == self.num_contexts + + for context_uuid, num_topologies in self.num_topologies.items(): + response = self.__ctx_cli.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = self.__ctx_cli.ListDevices(Empty()) + assert len(response.devices) == self.num_devices + + response = self.__ctx_cli.ListLinks(Empty()) + assert len(response.links) == self.num_links + + for context_uuid, num_services in self.num_services.items(): + response = self.__ctx_cli.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == num_services + + for context_uuid, num_slices in self.num_slices.items(): + response = self.__ctx_cli.ListSlices(ContextId(**json_context_id(context_uuid))) + assert len(response.slices) == num_slices + + def unload(self) -> None: + self.__ctx_cli.connect() + self.__dev_cli.connect() + self.__svc_cli.connect() + self.__slc_cli.connect() + + for _, slice_list in self.slices.items(): + for slice_ in slice_list: + self.__slc_cli.DeleteSlice(SliceId(**slice_['slice_id'])) + + for _, service_list in self.services.items(): + for service in service_list: + self.__svc_cli.DeleteService(ServiceId(**service['service_id'])) + + for link in self.links: + self.__ctx_cli.RemoveLink(LinkId(**link['link_id'])) + + for device in self.devices: + self.__dev_cli.DeleteDevice(DeviceId(**device['device_id'])) + + for _, topology_list in self.topologies.items(): + for topology in topology_list: + self.__ctx_cli.RemoveTopology(TopologyId(**topology['topology_id'])) + + for context in self.contexts: + self.__ctx_cli.RemoveContext(ContextId(**context['context_id'])) + +def compose_notifications(results : TypeResults) -> TypeNotificationList: + notifications = [] + for entity_name, action_name, num_ok, error_list in results: + entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] + action_infinitive, action_past = ACTION_TO_TEXT[action_name] + num_err = len(error_list) + for error in error_list: + notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error')) + if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')) + if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger')) + return notifications + +def check_descriptor_load_results(results : TypeResults, descriptor_loader : DescriptorLoader) -> None: + num_errors = 0 + for message,level in compose_notifications(results): + LOGGERS.get(level)(message) + if level != 'success': num_errors += 1 + if num_errors > 0: + MSG = 'Failed to load descriptors from "{:s}"' + raise Exception(MSG.format(str(descriptor_loader.descriptor_file_path))) + +def validate_empty_scenario(context_client : ContextClient) -> None: + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 diff --git a/src/common/tools/descriptor/Tools.py b/src/common/tools/descriptor/Tools.py index 9d6275748e6e35eaf240f80f100e993334d4c5ea..f03c635b802e5c003a6ea80af46ef740b97e500b 100644 --- a/src/common/tools/descriptor/Tools.py +++ b/src/common/tools/descriptor/Tools.py @@ -72,7 +72,7 @@ def format_service_custom_config_rules(service : Dict) -> Dict: return service def format_slice_custom_config_rules(slice_ : Dict) -> Dict: - config_rules = slice_.get('service_config', {}).get('config_rules', []) + config_rules = slice_.get('slice_config', {}).get('config_rules', []) config_rules = format_custom_config_rules(config_rules) slice_['slice_config']['config_rules'] = config_rules return slice_ diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py index 53f7dfd9822eb3a2efd48bf1b628547339a3ca69..07f0b7782dbd93479774af6324683753f906c5a1 100644 --- a/src/common/tools/grpc/Constraints.py +++ b/src/common/tools/grpc/Constraints.py @@ -17,7 +17,7 @@ import json -from typing import Any, Dict, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from common.proto.context_pb2 import Constraint, EndPointId from common.tools.grpc.Tools import grpc_message_to_json_string @@ -137,7 +137,31 @@ def update_constraint_endpoint_priority(constraints, endpoint_id : EndPointId, p constraint.endpoint_priority.priority = priority return constraint -def update_constraint_sla_availability(constraints, num_disjoint_paths : int, all_active : bool) -> Constraint: +def update_constraint_sla_capacity(constraints, capacity_gbps : float) -> Constraint: + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'sla_capacity': continue + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + + constraint.sla_capacity.capacity_gbps = capacity_gbps + return constraint + +def update_constraint_sla_latency(constraints, e2e_latency_ms : float) -> Constraint: + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'sla_latency': continue + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + + constraint.sla_latency.e2e_latency_ms = e2e_latency_ms + return constraint + +def update_constraint_sla_availability( + constraints, num_disjoint_paths : int, all_active : bool, availability : float +) -> Constraint: for constraint in constraints: if constraint.WhichOneof('constraint') != 'sla_availability': continue break # found, end loop @@ -147,8 +171,21 @@ def update_constraint_sla_availability(constraints, num_disjoint_paths : int, al constraint.sla_availability.num_disjoint_paths = num_disjoint_paths constraint.sla_availability.all_active = all_active + constraint.sla_availability.availability = availability return constraint +def update_constraint_sla_isolation(constraints, isolation_levels : List[int]) -> Constraint: + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'sla_isolation': continue + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + + for isolation_level in isolation_levels: + if isolation_level in constraint.sla_isolation.isolation_level: continue + constraint.sla_isolation.isolation_level.append(isolation_level) + return constraint def copy_constraints(source_constraints, target_constraints): for source_constraint in source_constraints: @@ -189,11 +226,27 @@ def copy_constraints(source_constraints, target_constraints): priority = source_constraint.endpoint_priority.priority update_constraint_endpoint_priority(target_constraints, endpoint_id, priority) + elif constraint_kind == 'sla_capacity': + sla_capacity = source_constraint.sla_capacity + capacity_gbps = sla_capacity.capacity_gbps + update_constraint_sla_capacity(target_constraints, capacity_gbps) + + elif constraint_kind == 'sla_latency': + sla_latency = source_constraint.sla_latency + e2e_latency_ms = sla_latency.e2e_latency_ms + update_constraint_sla_latency(target_constraints, e2e_latency_ms) + elif constraint_kind == 'sla_availability': sla_availability = source_constraint.sla_availability num_disjoint_paths = sla_availability.num_disjoint_paths all_active = sla_availability.all_active - update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active) + availability = sla_availability.availability + update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active, availability) + + elif constraint_kind == 'sla_isolation': + sla_isolation = source_constraint.sla_isolation + isolation_levels = sla_isolation.isolation_level + update_constraint_sla_isolation(target_constraints, isolation_levels) else: raise NotImplementedError('Constraint({:s})'.format(grpc_message_to_json_string(source_constraint))) diff --git a/src/common/tools/mutex_queues/MutexQueues.py b/src/common/tools/mutex_queues/MutexQueues.py index b9fc567d561287ed5d92f51a3cab0f92d58d88ed..96e22a86f012cb8326c380a0ebbf0c1b40cae21c 100644 --- a/src/common/tools/mutex_queues/MutexQueues.py +++ b/src/common/tools/mutex_queues/MutexQueues.py @@ -35,7 +35,7 @@ # self.mutex_queues.signal_done(device_uuid) import threading -from queue import Queue +from queue import Queue, Empty from typing import Dict class MutexQueues: @@ -67,8 +67,11 @@ class MutexQueues: with self.lock: queue : Queue = self.mutex_queues.setdefault(queue_name, Queue()) - # remove muself from the queue - queue.get_nowait() + # remove myself from the queue + try: + queue.get(block=True, timeout=0.1) + except Empty: + pass # if there are no other tasks queued, return if queue.qsize() == 0: return diff --git a/src/common/tools/object_factory/Constraint.py b/src/common/tools/object_factory/Constraint.py index e3c5129fd5bda5fb4a6659fc39b208bbdf0bb40f..ef00e3872343196f0a9f8de97d3b1ab6fc12d847 100644 --- a/src/common/tools/object_factory/Constraint.py +++ b/src/common/tools/object_factory/Constraint.py @@ -13,7 +13,7 @@ # limitations under the License. import json -from typing import Any, Dict, Union +from typing import Any, Dict, List, Union def json_constraint_custom(constraint_type : str, constraint_value : Union[str, Dict[str, Any]]) -> Dict: if not isinstance(constraint_value, str): constraint_value = json.dumps(constraint_value, sort_keys=True) @@ -29,5 +29,16 @@ def json_constraint_endpoint_location_gps(endpoint_id : Dict, latitude : float, def json_constraint_endpoint_priority(endpoint_id : Dict, priority : int) -> Dict: return {'endpoint_priority': {'endpoint_id': endpoint_id, 'priority': priority}} -def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool) -> Dict: - return {'sla_availability': {'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active}} +def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool, availability : float) -> Dict: + return {'sla_availability': { + 'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active, 'availability': availability + }} + +def json_constraint_sla_capacity(capacity_gbps : float) -> Dict: + return {'sla_capacity': {'capacity_gbps': capacity_gbps}} + +def json_constraint_sla_isolation(isolation_levels : List[int]) -> Dict: + return {'sla_isolation': {'isolation_level': isolation_levels}} + +def json_constraint_sla_latency(e2e_latency_ms : float) -> Dict: + return {'sla_latency': {'e2e_latency_ms': e2e_latency_ms}} diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py index 998c4c98f21648f6e186ad91bae90875dac84fab..9705e3187ffff633a4d127855c1c57afcf397e39 100644 --- a/src/compute/service/__main__.py +++ b/src/compute/service/__main__.py @@ -26,7 +26,7 @@ from .rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn terminate = threading.Event() LOGGER = None -def signal_handler(signal, frame): # pylint: disable=redefined-outer-name +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name, unused-argument LOGGER.warning('Terminate signal received') terminate.set() diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py index 0c66254d93220392d44c8393373ba94ddd7b3f93..67ef3dfb0ba1519440b0a22f46935165c8388cb8 100644 --- a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py +++ b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py @@ -12,48 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from flask.json import jsonify from flask_restful import Resource -from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId -from common.proto.policy_pb2 import PolicyRuleId -from common.tools.grpc.Tools import grpc_message_to_json -from common.tools.object_factory.Connection import json_connection_id -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Link import json_link_id -from common.tools.object_factory.PolicyRule import json_policyrule_id -from common.tools.object_factory.Service import json_service_id -from common.tools.object_factory.Slice import json_slice_id -from common.tools.object_factory.Topology import json_topology_id +from common.proto.context_pb2 import Empty from context.client.ContextClient import ContextClient - - -def format_grpc_to_json(grpc_reply): - return jsonify(grpc_message_to_json(grpc_reply)) - -def grpc_connection_id(connection_uuid): - return ConnectionId(**json_connection_id(connection_uuid)) - -def grpc_context_id(context_uuid): - return ContextId(**json_context_id(context_uuid)) - -def grpc_device_id(device_uuid): - return DeviceId(**json_device_id(device_uuid)) - -def grpc_link_id(link_uuid): - return LinkId(**json_link_id(link_uuid)) - -def grpc_service_id(context_uuid, service_uuid): - return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid))) - -def grpc_slice_id(context_uuid, slice_uuid): - return SliceId(**json_slice_id(slice_uuid, context_id=json_context_id(context_uuid))) - -def grpc_topology_id(context_uuid, topology_uuid): - return TopologyId(**json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))) - -def grpc_policy_rule_id(policy_rule_uuid): - return PolicyRuleId(**json_policyrule_id(policy_rule_uuid)) +from .Tools import ( + format_grpc_to_json, grpc_connection_id, grpc_context_id, grpc_device_id, grpc_link_id, grpc_policy_rule_id, + grpc_service_id, grpc_slice_id, grpc_topology_id) class _Resource(Resource): diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/Tools.py b/src/compute/service/rest_server/nbi_plugins/debug_api/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..f3dff545ba9812ff3f4e13c3da53774af7626014 --- /dev/null +++ b/src/compute/service/rest_server/nbi_plugins/debug_api/Tools.py @@ -0,0 +1,54 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask.json import jsonify +from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, LinkId, ServiceId, SliceId, TopologyId +from common.proto.policy_pb2 import PolicyRuleId +from common.tools.grpc.Tools import grpc_message_to_json +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.PolicyRule import json_policyrule_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Slice import json_slice_id +from common.tools.object_factory.Topology import json_topology_id + + +def format_grpc_to_json(grpc_reply): + return jsonify(grpc_message_to_json(grpc_reply)) + +def grpc_connection_id(connection_uuid): + return ConnectionId(**json_connection_id(connection_uuid)) + +def grpc_context_id(context_uuid): + return ContextId(**json_context_id(context_uuid)) + +def grpc_device_id(device_uuid): + return DeviceId(**json_device_id(device_uuid)) + +def grpc_link_id(link_uuid): + return LinkId(**json_link_id(link_uuid)) + +def grpc_service_id(context_uuid, service_uuid): + return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid))) + +def grpc_slice_id(context_uuid, slice_uuid): + return SliceId(**json_slice_id(slice_uuid, context_id=json_context_id(context_uuid))) + +def grpc_topology_id(context_uuid, topology_uuid): + return TopologyId(**json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))) + +def grpc_policy_rule_id(policy_rule_uuid): + return PolicyRuleId(**json_policyrule_id(policy_rule_uuid)) diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py b/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py index d9243cca711a2b2ad00509102ecab5b06c6cc334..d1309353c412a738e2f2238d0bb4fff07765b825 100644 --- a/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py +++ b/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py @@ -12,52 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. -# RFC 8466 - L2VPN Service Model (L2SM) -# Ref: https://datatracker.ietf.org/doc/html/rfc8466 - from compute.service.rest_server.RestServer import RestServer from .Resources import ( Connection, ConnectionIds, Connections, Context, ContextIds, Contexts, Device, DeviceIds, Devices, Link, LinkIds, Links, PolicyRule, PolicyRuleIds, PolicyRules, Service, ServiceIds, Services, Slice, SliceIds, Slices, Topologies, Topology, TopologyIds) -URL_PREFIX = '/api' +URL_PREFIX = '/debug-api' -# Use 'path' type in Service and Sink because service_uuid and link_uuid might contain char '/' and Flask is unable to -# recognize them in 'string' type. +# Use 'path' type since some identifiers might contain char '/' and Flask is unable to recognize them in 'string' type. RESOURCES = [ # (endpoint_name, resource_class, resource_url) ('api.context_ids', ContextIds, '/context_ids'), ('api.contexts', Contexts, '/contexts'), - ('api.context', Context, '/context/'), + ('api.context', Context, '/context/'), - ('api.topology_ids', TopologyIds, '/context//topology_ids'), - ('api.topologies', Topologies, '/context//topologies'), - ('api.topology', Topology, '/context//topology/'), + ('api.topology_ids', TopologyIds, '/context//topology_ids'), + ('api.topologies', Topologies, '/context//topologies'), + ('api.topology', Topology, '/context//topology/'), - ('api.service_ids', ServiceIds, '/context//service_ids'), - ('api.services', Services, '/context//services'), - ('api.service', Service, '/context//service/'), + ('api.service_ids', ServiceIds, '/context//service_ids'), + ('api.services', Services, '/context//services'), + ('api.service', Service, '/context//service/'), - ('api.slice_ids', SliceIds, '/context//slice_ids'), - ('api.slices', Slices, '/context//slices'), - ('api.slice', Slice, '/context//slice/'), + ('api.slice_ids', SliceIds, '/context//slice_ids'), + ('api.slices', Slices, '/context//slices'), + ('api.slice', Slice, '/context//slice/'), ('api.device_ids', DeviceIds, '/device_ids'), ('api.devices', Devices, '/devices'), - ('api.device', Device, '/device/'), + ('api.device', Device, '/device/'), ('api.link_ids', LinkIds, '/link_ids'), ('api.links', Links, '/links'), ('api.link', Link, '/link/'), - ('api.connection_ids', ConnectionIds, '/context//service//connection_ids'), - ('api.connections', Connections, '/context//service//connections'), + ('api.connection_ids', ConnectionIds, '/context//service//connection_ids'), + ('api.connections', Connections, '/context//service//connections'), ('api.connection', Connection, '/connection/'), ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'), ('api.policyrules', PolicyRules, '/policyrules'), - ('api.policyrule', PolicyRule, '/policyrule/'), + ('api.policyrule', PolicyRule, '/policyrule/'), ] def register_debug_api(rest_server : RestServer): diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 1a8936ed4025586bf4de280b64cf2008b14c1a50..b89fa2207d1cd69e30612e8cecc8aa0f325e9dd3 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -113,7 +113,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id) num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()} num_disjoint_paths = min(num_endpoints_per_location) - update_constraint_sla_availability(constraints, num_disjoint_paths, all_active) + update_constraint_sla_availability(constraints, num_disjoint_paths, all_active, 0.0) return target diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py index 1b9027b1feb7c65c6fb3ee6ecdef485e4719a1b5..110c51af5fe0e4cd8e012fd4105712ed176dd12a 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/__init__.py @@ -21,7 +21,7 @@ from .L2VPN_Services import L2VPN_Services from .L2VPN_Service import L2VPN_Service from .L2VPN_SiteNetworkAccesses import L2VPN_SiteNetworkAccesses -URL_PREFIX = '/ietf-l2vpn-svc:l2vpn-svc' +URL_PREFIX = '/data/ietf-l2vpn-svc:l2vpn-svc' def _add_resource(rest_server : RestServer, resource : Resource, *urls, **kwargs): urls = [(URL_PREFIX + url) for url in urls] diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py index dd60441ca70329b9431188e28c21d98d941ada14..09723cc6f6b31e2496bf5ab475f50d0aa58f95c2 100644 --- a/src/context/service/database/ConfigRule.py +++ b/src/context/service/database/ConfigRule.py @@ -80,7 +80,7 @@ def compose_config_rules_data( return dict_config_rules def upsert_config_rules( - session : Session, config_rules : List[Dict], + session : Session, config_rules : List[Dict], is_delete : bool = False, device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None, ) -> bool: uuids_to_delete : Set[str] = set() @@ -89,7 +89,9 @@ def upsert_config_rules( for config_rule in config_rules: configrule_uuid = config_rule['configrule_uuid'] configrule_action = config_rule['action'] - if configrule_action == ORM_ConfigActionEnum.SET: + if is_delete or configrule_action == ORM_ConfigActionEnum.DELETE: + uuids_to_delete.add(configrule_uuid) + elif configrule_action == ORM_ConfigActionEnum.SET: position = uuids_to_upsert.get(configrule_uuid) if position is None: # if not added, add it @@ -98,8 +100,6 @@ def upsert_config_rules( else: # if already added, update occurrence rules_to_upsert[position] = config_rule - elif configrule_action == ORM_ConfigActionEnum.DELETE: - uuids_to_delete.add(configrule_uuid) else: MSG = 'Action for ConfigRule({:s}) is not supported '+\ '(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})' diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index 0540841c3a570f9a1e28ec530998b115f73a62a7..3a73f6589f9332aa4c84f8f296f2cb56db3048bf 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -66,7 +66,7 @@ def compose_constraints_data( constraint_name = '{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid) elif kind in { ConstraintKindEnum.SCHEDULE, ConstraintKindEnum.SLA_CAPACITY, ConstraintKindEnum.SLA_LATENCY, - ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION_LEVEL + ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION }: constraint_name = '{:s}:{:s}:'.format(parent_kind, kind.value) else: @@ -81,7 +81,7 @@ def compose_constraints_data( return dict_constraints def upsert_constraints( - session : Session, constraints : List[Dict], + session : Session, constraints : List[Dict], is_delete : bool = False, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None ) -> bool: uuids_to_upsert : Dict[str, int] = dict() @@ -107,11 +107,11 @@ def upsert_constraints( #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}) #LOGGER.warning('delete stmt={:s}'.format(str(str_stmt))) constraint_deletes = session.execute(stmt) - LOGGER.warning('constraint_deletes.rowcount={:s}'.format(str(constraint_deletes.rowcount))) + #LOGGER.warning('constraint_deletes.rowcount={:s}'.format(str(constraint_deletes.rowcount))) delete_affected = int(constraint_deletes.rowcount) > 0 upsert_affected = False - if len(constraints) > 0: + if not is_delete and len(constraints) > 0: stmt = insert(ConstraintModel).values(constraints) stmt = stmt.on_conflict_do_update( index_elements=[ConstraintModel.constraint_uuid], diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py index a100103890f293d418b4c70a7948ad9687ffe5b3..e95cec4ae533795b23b8fd4e2f26ac9000c1bcce 100644 --- a/src/context/service/database/PolicyRule.py +++ b/src/context/service/database/PolicyRule.py @@ -65,7 +65,7 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule policyrule_kind = PolicyRuleKindEnum._member_map_.get(policyrule_kind.upper()) # pylint: disable=no-member policyrule_state = grpc_to_enum__policyrule_state(policyrule_basic.policyRuleState.policyRuleState) - policyrule_state_message = policyrule_basic.policyRuleState.policyRuleStateMessage + policyrule_state_msg = policyrule_basic.policyRuleState.policyRuleStateMessage json_policyrule_basic = grpc_message_to_json(policyrule_basic) policyrule_eca_data = json.dumps({ @@ -77,15 +77,15 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule now = datetime.datetime.utcnow() policyrule_data = [{ - 'policyrule_uuid' : policyrule_uuid, - 'policyrule_kind' : policyrule_kind, - 'policyrule_state' : policyrule_state, - 'policyrule_state_message': policyrule_state_message, - 'policyrule_priority' : policyrule_basic.priority, - 'policyrule_eca_data' : policyrule_eca_data, - 'created_at' : now, - 'updated_at' : now, - }] + 'policyrule_uuid' : policyrule_uuid, + 'policyrule_kind' : policyrule_kind, + 'policyrule_state' : policyrule_state, + 'policyrule_state_msg': policyrule_state_msg, + 'policyrule_priority' : policyrule_basic.priority, + 'policyrule_eca_data' : policyrule_eca_data, + 'created_at' : now, + 'updated_at' : now, + }] policyrule_service_uuid = None if policyrule_kind == PolicyRuleKindEnum.SERVICE: @@ -108,11 +108,11 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule stmt = stmt.on_conflict_do_update( index_elements=[PolicyRuleModel.policyrule_uuid], set_=dict( - policyrule_state = stmt.excluded.policyrule_state, - policyrule_state_message = stmt.excluded.policyrule_state_message, - policyrule_priority = stmt.excluded.policyrule_priority, - policyrule_eca_data = stmt.excluded.policyrule_eca_data, - updated_at = stmt.excluded.updated_at, + policyrule_state = stmt.excluded.policyrule_state, + policyrule_state_msg = stmt.excluded.policyrule_state_msg, + policyrule_priority = stmt.excluded.policyrule_priority, + policyrule_eca_data = stmt.excluded.policyrule_eca_data, + updated_at = stmt.excluded.updated_at, ) ) stmt = stmt.returning(PolicyRuleModel.created_at, PolicyRuleModel.updated_at) diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index 4b63a4ae56fa278679d145f1da2c62e767f73005..a81a80c3c2398fed16842bcc3d8aa16342edb72b 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -77,8 +77,11 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]: service_endpoints_data : List[Dict] = list() for i,endpoint_id in enumerate(request.service_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid - if endpoint_context_uuid not in {raw_context_uuid, context_uuid}: + if len(endpoint_context_uuid) == 0: + endpoint_context_uuid = context_get_uuid(request.service_id.context_id, allow_random=False) + else: + endpoint_context_uuid = context_get_uuid(endpoint_id.topology_id.context_id, allow_random=False) + if endpoint_context_uuid != context_uuid: raise InvalidArgumentException( 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), endpoint_context_uuid, diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 7c291e33d858841054adc59306fbedb2e9a18f79..1d6781d53f7c85d8cb878b1b38b0de65b4ef5726 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -77,8 +77,11 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: slice_endpoints_data : List[Dict] = list() for i,endpoint_id in enumerate(request.slice_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid - if endpoint_context_uuid not in {raw_context_uuid, context_uuid}: + if len(endpoint_context_uuid) == 0: + endpoint_context_uuid = context_get_uuid(request.slice_id.context_id, allow_random=False) + else: + endpoint_context_uuid = context_get_uuid(endpoint_id.topology_id.context_id, allow_random=False) + if endpoint_context_uuid != context_uuid: raise InvalidArgumentException( 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), endpoint_context_uuid, @@ -175,10 +178,6 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=False) - if len(request.slice_constraints) > 0: raise NotImplementedError('UnsetSlice: removal of constraints') - if len(request.slice_config.config_rules) > 0: raise NotImplementedError('UnsetSlice: removal of config rules') - if len(request.slice_endpoint_ids) > 0: raise NotImplementedError('UnsetSlice: removal of endpoints') - slice_endpoint_uuids : Set[str] = set() for i,endpoint_id in enumerate(request.slice_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid @@ -200,6 +199,10 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: for subslice_id in request.slice_subslice_ids } + now = datetime.datetime.utcnow() + constraints = compose_constraints_data(request.slice_constraints, now, slice_uuid=slice_uuid) + config_rules = compose_config_rules_data(request.slice_config.config_rules, now, slice_uuid=slice_uuid) + def callback(session : Session) -> bool: num_deletes = 0 if len(slice_service_uuids) > 0: @@ -210,17 +213,21 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: )).delete() if len(slice_subslice_uuids) > 0: num_deletes += session.query(SliceSubSliceModel)\ - .filter_by(and_( + .filter(and_( SliceSubSliceModel.slice_uuid == slice_uuid, SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) )).delete() if len(slice_endpoint_uuids) > 0: num_deletes += session.query(SliceEndPointModel)\ - .filter_by(and_( + .filter(and_( SliceEndPointModel.slice_uuid == slice_uuid, SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) )).delete() - return num_deletes > 0 + + changed_constraints = upsert_constraints(session, constraints, is_delete=True, slice_uuid=slice_uuid) + changed_config_rules = upsert_config_rules(session, config_rules, is_delete=True, slice_uuid=slice_uuid) + + return num_deletes > 0 or changed_constraints or changed_config_rules updated = run_transaction(sessionmaker(bind=db_engine), callback) return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index 363611105135661ccf3bd001c2e65ab75f9b6a6c..d7bb97cd0fec1037e98c8713b885b2d5141cae63 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -28,9 +28,9 @@ class ConfigRuleModel(_Base): __tablename__ = 'configrule' configrule_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE'), nullable=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True) - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE'), nullable=True, index=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True, index=True) position = Column(Integer, nullable=False) kind = Column(Enum(ConfigRuleKindEnum), nullable=False) action = Column(Enum(ORM_ConfigActionEnum), nullable=False) diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py index c2b20de202cbeb065ffd50683d015729c76af9bc..156e33c6bb32e237af241035f1d9672b0b419222 100644 --- a/src/context/service/database/models/ConnectionModel.py +++ b/src/context/service/database/models/ConnectionModel.py @@ -25,7 +25,7 @@ class ConnectionModel(_Base): __tablename__ = 'connection' connection_uuid = Column(UUID(as_uuid=False), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False, index=True) settings = Column(String, nullable=False) created_at = Column(DateTime, nullable=False) updated_at = Column(DateTime, nullable=False) @@ -56,7 +56,7 @@ class ConnectionEndPointModel(_Base): __tablename__ = 'connection_endpoint' connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) position = Column(Integer, nullable=False) connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined') @@ -70,7 +70,7 @@ class ConnectionSubServiceModel(_Base): __tablename__ = 'connection_subservice' connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True) - subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) + subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True) connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined') subservice = relationship('ServiceModel', lazy='joined') # back_populates='connection_subservices' diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index 01c7bcb76b00ac5d8b49d9f99f010d1ddfd30788..2412080c1a2883e7bed85e6e22f389270b3f73bc 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -19,22 +19,24 @@ from typing import Dict from ._Base import _Base # Enum values should match name of field in Constraint message +# - enum item name should be Constraint message type in upper case +# - enum item value should be Constraint message type as it is in the proto files class ConstraintKindEnum(enum.Enum): - CUSTOM = 'custom' - SCHEDULE = 'schedule' - ENDPOINT_LOCATION = 'endpoint_location' - ENDPOINT_PRIORITY = 'endpoint_priority' - SLA_CAPACITY = 'sla_capacity' - SLA_LATENCY = 'sla_latency' - SLA_AVAILABILITY = 'sla_availability' - SLA_ISOLATION_LEVEL = 'sla_isolation' + CUSTOM = 'custom' + SCHEDULE = 'schedule' + ENDPOINT_LOCATION = 'endpoint_location' + ENDPOINT_PRIORITY = 'endpoint_priority' + SLA_CAPACITY = 'sla_capacity' + SLA_LATENCY = 'sla_latency' + SLA_AVAILABILITY = 'sla_availability' + SLA_ISOLATION = 'sla_isolation' class ConstraintModel(_Base): __tablename__ = 'constraint' constraint_uuid = Column(UUID(as_uuid=False), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True) - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True, index=True) position = Column(Integer, nullable=False) kind = Column(Enum(ConstraintKindEnum), nullable=False) data = Column(String, nullable=False) diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index e591bc718711c6e0b8219eb60ce68c42f35a800c..12ba7e10e7c3d5789f9bf16ad7b4f50c35a36bf5 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -23,8 +23,8 @@ class EndPointModel(_Base): __tablename__ = 'endpoint' endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), nullable=False) - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), nullable=False, index=True) + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False, index=True) name = Column(String, nullable=False) endpoint_type = Column(String, nullable=False) kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index 49c62d376624dc02b51a2b56860b04c322d66934..ee591f5c8404cd7f0f6c97651b5f731a51c43303 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -46,7 +46,7 @@ class LinkEndPointModel(_Base): __tablename__ = 'link_endpoint' link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints' diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py index 4059991e1f1af7851d9fced17739b92675261227..2f0c8a326a57a05ab1fd623a968dea0bc39d9e76 100644 --- a/src/context/service/database/models/PolicyRuleModel.py +++ b/src/context/service/database/models/PolicyRuleModel.py @@ -28,15 +28,15 @@ class PolicyRuleKindEnum(enum.Enum): class PolicyRuleModel(_Base): __tablename__ = 'policyrule' - policyrule_uuid = Column(UUID(as_uuid=False), primary_key=True) - policyrule_kind = Column(Enum(PolicyRuleKindEnum), nullable=False) - policyrule_state = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False) - policyrule_state_message = Column(String, nullable=False) - policyrule_priority = Column(Integer, nullable=False) - policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True) - policyrule_eca_data = Column(String, nullable=False) - created_at = Column(DateTime, nullable=False) - updated_at = Column(DateTime, nullable=False) + policyrule_uuid = Column(UUID(as_uuid=False), primary_key=True) + policyrule_kind = Column(Enum(PolicyRuleKindEnum), nullable=False) + policyrule_state = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False) + policyrule_state_msg = Column(String, nullable=False) + policyrule_priority = Column(Integer, nullable=False) + policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True, index=True) + policyrule_eca_data = Column(String, nullable=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) policyrule_service = relationship('ServiceModel') # back_populates='policyrules' policyrule_devices = relationship('PolicyRuleDeviceModel' ) # back_populates='policyrule' @@ -55,7 +55,7 @@ class PolicyRuleModel(_Base): 'policyRuleId': self.dump_id(), 'policyRuleState': { 'policyRuleState': self.policyrule_state.value, - 'policyRuleStateMessage': self.policyrule_state_message, + 'policyRuleStateMessage': self.policyrule_state_msg, }, 'priority': self.policyrule_priority, }) @@ -71,7 +71,7 @@ class PolicyRuleDeviceModel(_Base): __tablename__ = 'policyrule_device' policyrule_uuid = Column(ForeignKey('policyrule.policyrule_uuid', ondelete='RESTRICT'), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True, index=True) #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices' device = relationship('DeviceModel', lazy='joined') # back_populates='policyrule_devices' diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py index b581bf900a8861d9af199fef62bd218159b1e00e..09ff381b5eb374ea752590bba5403fe816319036 100644 --- a/src/context/service/database/models/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -25,7 +25,7 @@ class ServiceModel(_Base): __tablename__ = 'service' service_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False, index=True) service_name = Column(String, nullable=False) service_type = Column(Enum(ORM_ServiceTypeEnum), nullable=False) service_status = Column(Enum(ORM_ServiceStatusEnum), nullable=False) @@ -67,7 +67,7 @@ class ServiceEndPointModel(_Base): __tablename__ = 'service_endpoint' service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints' diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py index 1a562bcd973cc41d777fdd20e4d42622afeebc44..2d6c884169154fee8d44c26464416c6708c650b1 100644 --- a/src/context/service/database/models/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -24,7 +24,7 @@ class SliceModel(_Base): __tablename__ = 'slice' slice_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False, index=True) slice_name = Column(String, nullable=True) slice_status = Column(Enum(ORM_SliceStatusEnum), nullable=False) slice_owner_uuid = Column(String, nullable=True) @@ -81,7 +81,7 @@ class SliceEndPointModel(_Base): __tablename__ = 'slice_endpoint' slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) slice = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints' @@ -90,7 +90,7 @@ class SliceServiceModel(_Base): __tablename__ = 'slice_service' slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True) slice = relationship('SliceModel', back_populates='slice_services', lazy='joined') service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services' @@ -98,5 +98,9 @@ class SliceServiceModel(_Base): class SliceSubSliceModel(_Base): __tablename__ = 'slice_subslice' - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True) + subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True) + + slice = relationship( + 'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined') + subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined') diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index 92802e5b2ddb4ed57342bbd244255b73b11c6cce..7dc2333f0a9b979f251c173d850a235dcb822d91 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -22,7 +22,7 @@ class TopologyModel(_Base): __tablename__ = 'topology' topology_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False, index=True) topology_name = Column(String, nullable=False) created_at = Column(DateTime, nullable=False) updated_at = Column(DateTime, nullable=False) @@ -56,8 +56,8 @@ class TopologyModel(_Base): class TopologyDeviceModel(_Base): __tablename__ = 'topology_device' - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True) + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True, index=True) #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices' device = relationship('DeviceModel', lazy='joined') # back_populates='topology_devices' @@ -65,8 +65,8 @@ class TopologyDeviceModel(_Base): class TopologyLinkModel(_Base): __tablename__ = 'topology_link' - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) - link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True) + link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True, index=True) #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links' link = relationship('LinkModel', lazy='joined') # back_populates='topology_links' diff --git a/src/context/service/database/models/_Base.py b/src/context/service/database/models/_Base.py index 4323fb7130462b13958627216c62f1fe4edc91c7..a10de60eb8731132ec815de1ff897c06ac12b665 100644 --- a/src/context/service/database/models/_Base.py +++ b/src/context/service/database/models/_Base.py @@ -13,10 +13,60 @@ # limitations under the License. import sqlalchemy -from sqlalchemy.orm import declarative_base +from typing import Any, List +from sqlalchemy.orm import Session, sessionmaker, declarative_base +from sqlalchemy.sql import text +from sqlalchemy_cockroachdb import run_transaction _Base = declarative_base() +def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None: + def index_storing( + index_name : str, table_name : str, index_fields : List[str], storing_fields : List[str] + ) -> Any: + str_index_fields = ','.join(['"{:s}"'.format(index_field) for index_field in index_fields]) + str_storing_fields = ','.join(['"{:s}"'.format(storing_field) for storing_field in storing_fields]) + INDEX_STORING = 'CREATE INDEX IF NOT EXISTS {:s} ON "{:s}" ({:s}) STORING ({:s});' + return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields)) + + statements = [ + index_storing('configrule_device_uuid_rec_idx', 'configrule', ['device_uuid'], [ + 'service_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at' + ]), + index_storing('configrule_service_uuid_rec_idx', 'configrule', ['service_uuid'], [ + 'device_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at' + ]), + index_storing('configrule_slice_uuid_rec_idx', 'configrule', ['slice_uuid'], [ + 'device_uuid', 'service_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at' + ]), + index_storing('connection_service_uuid_rec_idx', 'connection', ['service_uuid'], [ + 'settings', 'created_at', 'updated_at' + ]), + index_storing('constraint_service_uuid_rec_idx', 'constraint', ['service_uuid'], [ + 'slice_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at' + ]), + index_storing('constraint_slice_uuid_rec_idx', 'constraint', ['slice_uuid'], [ + 'service_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at' + ]), + index_storing('endpoint_device_uuid_rec_idx', 'endpoint', ['device_uuid'], [ + 'topology_uuid', 'name', 'endpoint_type', 'kpi_sample_types', 'created_at', 'updated_at' + ]), + index_storing('service_context_uuid_rec_idx', 'service', ['context_uuid'], [ + 'service_name', 'service_type', 'service_status', 'created_at', 'updated_at' + ]), + index_storing('slice_context_uuid_rec_idx', 'slice', ['context_uuid'], [ + 'slice_name', 'slice_status', 'slice_owner_uuid', 'slice_owner_string', 'created_at', 'updated_at' + ]), + + index_storing('topology_context_uuid_rec_idx', 'topology', ['context_uuid'], [ + 'topology_name', 'created_at', 'updated_at' + ]), + ] + def callback(session : Session) -> bool: + for stmt in statements: session.execute(stmt) + run_transaction(sessionmaker(bind=db_engine), callback) + def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False): if drop_if_exists: _Base.metadata.drop_all(db_engine) _Base.metadata.create_all(db_engine) + create_performance_enhancers(db_engine) diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 8634c1f309e0c060654a168a1ad400f1d4722a32..6b52ef4c0f3583de628706ba79efffb9d5709820 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -17,7 +17,7 @@ from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.object_factory.ConfigRule import json_config_rule_set from common.tools.object_factory.Connection import json_connection, json_connection_id -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_custom, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import json_device_id, json_device_packetrouter_disabled from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id @@ -95,7 +95,7 @@ def compose_service( for device_id, endpoint_name in endpoint_ids ] constraints = [ - json_constraint_custom('latency[ms]', str(latency_ms)), + json_constraint_sla_latency(latency_ms), json_constraint_custom('jitter[us]', str(jitter_us)), ] config_rules = [ @@ -128,7 +128,7 @@ def compose_slice( for device_id, endpoint_name in endpoint_ids ] constraints = [ - json_constraint_custom('latency[ms]', str(latency_ms)), + json_constraint_sla_latency(latency_ms), json_constraint_custom('jitter[us]', str(jitter_us)), ] config_rules = [ diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 1987be15ddef49f1756909ce9203d1aaa574d6f0..be40e64ecd25a5c46c23d5ec0a73a2484b65691d 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -22,13 +22,14 @@ from common.proto.device_pb2_grpc import DeviceServiceServicer from common.tools.context_queries.Device import get_device from common.tools.mutex_queues.MutexQueues import MutexQueues from context.client.ContextClient import ContextClient -from device.service.Errors import ERROR_MISSING_DRIVER, ERROR_MISSING_KPI from .driver_api._Driver import _Driver from .driver_api.DriverInstanceCache import DriverInstanceCache, get_driver from .monitoring.MonitoringLoops import MonitoringLoops +from .ErrorMessages import ERROR_MISSING_DRIVER, ERROR_MISSING_KPI from .Tools import ( check_connect_rules, check_no_endpoints, compute_rules_to_add_delete, configure_rules, deconfigure_rules, - populate_config_rules, populate_endpoint_monitoring_resources, populate_endpoints, populate_initial_config_rules, subscribe_kpi, unsubscribe_kpi, update_endpoints) + populate_config_rules, populate_endpoint_monitoring_resources, populate_endpoints, populate_initial_config_rules, + subscribe_kpi, unsubscribe_kpi, update_endpoints) LOGGER = logging.getLogger(__name__) @@ -108,9 +109,9 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): if device is None: raise NotFoundException('Device', device_uuid, extra_details='loading in ConfigureDevice') - driver : _Driver = self.driver_instance_cache.get(device_uuid) + driver : _Driver = get_driver(self.driver_instance_cache, device) if driver is None: - msg = ERROR_MISSING_DRIVER.format(str(device_uuid)) + msg = ERROR_MISSING_DRIVER.format(device_uuid=str(device_uuid)) raise OperationFailedException('ConfigureDevice', extra_details=msg) if DeviceDriverEnum.DEVICEDRIVER_P4 in device.device_drivers: @@ -149,6 +150,11 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): self.mutex_queues.wait_my_turn(device_uuid) try: context_client = ContextClient() + device = get_device(context_client, device_uuid, rw_copy=False) + if device is None: + raise NotFoundException('Device', device_uuid, extra_details='loading in DeleteDevice') + device_uuid = device.device_id.device_uuid.uuid + self.monitoring_loops.remove_device(device_uuid) self.driver_instance_cache.delete(device_uuid) context_client.RemoveDevice(request) @@ -162,9 +168,14 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): self.mutex_queues.wait_my_turn(device_uuid) try: - driver : _Driver = self.driver_instance_cache.get(device_uuid) + context_client = ContextClient() + device = get_device(context_client, device_uuid, rw_copy=False) + if device is None: + raise NotFoundException('Device', device_uuid, extra_details='loading in DeleteDevice') + + driver : _Driver = get_driver(self.driver_instance_cache, device) if driver is None: - msg = ERROR_MISSING_DRIVER.format(str(device_uuid)) + msg = ERROR_MISSING_DRIVER.format(device_uuid=str(device_uuid)) raise OperationFailedException('GetInitialConfig', extra_details=msg) device_config = DeviceConfig() @@ -190,15 +201,20 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): kpi_uuid = request.kpi_id.kpi_id.uuid kpi_details = self.monitoring_loops.get_kpi_by_uuid(kpi_uuid) if kpi_details is None: - msg = ERROR_MISSING_KPI.format(str(kpi_uuid)) + msg = ERROR_MISSING_KPI.format(kpi_uuid=str(kpi_uuid)) raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) device_uuid = kpi_details[0] self.mutex_queues.wait_my_turn(device_uuid) try: - driver : _Driver = self.driver_instance_cache.get(device_uuid) + context_client = ContextClient() + device = get_device(context_client, device_uuid, rw_copy=False) + if device is None: + raise NotFoundException('Device', device_uuid, extra_details='loading in DeleteDevice') + + driver : _Driver = get_driver(self.driver_instance_cache, device) if driver is None: - msg = ERROR_MISSING_DRIVER.format(str(device_uuid)) + msg = ERROR_MISSING_DRIVER.format(device_uuid=str(device_uuid)) raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) errors = manage_kpi_method(request, driver, self.monitoring_loops) diff --git a/src/device/service/ErrorMessages.py b/src/device/service/ErrorMessages.py new file mode 100644 index 0000000000000000000000000000000000000000..1fbea721fdc52bdf759581c0525b30b1206ae844 --- /dev/null +++ b/src/device/service/ErrorMessages.py @@ -0,0 +1,39 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +_DEVICE_ID = 'DeviceId({device_uuid:s})' +_ENDPOINT_ID = 'EndpointId({endpoint_uuid:s})' +_ENDPOINT_DATA = 'EndpointId({endpoint_data:s})' +_KPI = 'Kpi({kpi_uuid:s})' +_DEVICE_ENDPOINT_ID = _DEVICE_ID + '/' + _ENDPOINT_ID +_RESOURCE_KEY = 'Resource(key={resource_key:s})' +_RESOURCE_KEY_VALUE = 'Resource(key={resource_key:s}, value={resource_value:s})' +_SUBSCRIPTION = 'Subscription(key={subscr_key:s}, duration={subscr_duration:s}, interval={subscr_interval:s})' +_SAMPLE_TYPE = 'SampleType({sample_type_id:s}/{sample_type_name:s})' +_ERROR = 'Error({error:s})' + +ERROR_MISSING_DRIVER = _DEVICE_ID + ' has not been added to this Device instance' +ERROR_MISSING_KPI = _KPI + ' not found' + +ERROR_BAD_ENDPOINT = _DEVICE_ID + ': GetConfig retrieved malformed ' + _ENDPOINT_DATA + +ERROR_GET = _DEVICE_ID + ': Unable to Get ' + _RESOURCE_KEY + '; ' + _ERROR +ERROR_GET_INIT = _DEVICE_ID + ': Unable to Get Initial ' + _RESOURCE_KEY + '; ' + _ERROR +ERROR_DELETE = _DEVICE_ID + ': Unable to Delete ' + _RESOURCE_KEY_VALUE + '; ' + _ERROR +ERROR_SET = _DEVICE_ID + ': Unable to Set ' + _RESOURCE_KEY_VALUE + '; ' + _ERROR + +ERROR_SAMPLETYPE = _DEVICE_ENDPOINT_ID + ': ' + _SAMPLE_TYPE + ' not supported' + +ERROR_SUBSCRIBE = _DEVICE_ID + ': Unable to Subscribe ' + _SUBSCRIPTION + '; ' + _ERROR +ERROR_UNSUBSCRIBE = _DEVICE_ID + ': Unable to Unsubscribe ' + _SUBSCRIPTION + '; ' + _ERROR diff --git a/src/device/service/Errors.py b/src/device/service/Errors.py deleted file mode 100644 index a29a70f05a79ba1517d0bd305dd94afa76703cac..0000000000000000000000000000000000000000 --- a/src/device/service/Errors.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ERROR_MISSING_DRIVER = 'Device({:s}) has not been added to this Device instance' -ERROR_MISSING_KPI = 'Kpi({:s}) not found' - -ERROR_BAD_ENDPOINT = 'Device({:s}): GetConfig retrieved malformed Endpoint({:s})' - -ERROR_GET = 'Device({:s}): Unable to Get resource(key={:s}); error({:s})' -ERROR_GET_INIT = 'Device({:s}): Unable to Get Initial resource(key={:s}); error({:s})' -ERROR_DELETE = 'Device({:s}): Unable to Delete resource(key={:s}, value={:s}); error({:s})' -ERROR_SET = 'Device({:s}): Unable to Set resource(key={:s}, value={:s}); error({:s})' - -ERROR_SAMPLETYPE = 'Device({:s})/EndPoint({:s}): SampleType({:s}/{:s}) not supported' - -ERROR_SUBSCRIBE = 'Device({:s}): Unable to Subscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ - 'error({:s})' -ERROR_UNSUBSCRIBE = 'Device({:s}): Unable to Unsubscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ - 'error({:s})' diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 7dd61085b0f492d7a1d7873a9e1fe3f73a7b407c..571e8acdab7fc243c22923a69202c89db88c8ce3 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -23,9 +23,10 @@ from common.tools.grpc.ConfigRules import update_config_rule_custom from common.tools.grpc.Tools import grpc_message_to_json from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS from .monitoring.MonitoringLoops import MonitoringLoops -from .Errors import ( +from .ErrorMessages import ( ERROR_BAD_ENDPOINT, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET, - ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE) + ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE +) LOGGER = logging.getLogger(__name__) @@ -85,12 +86,13 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon errors : List[str] = list() for endpoint in results_getconfig: if len(endpoint) != 2: - errors.append(ERROR_BAD_ENDPOINT.format(device_uuid, str(endpoint))) + errors.append(ERROR_BAD_ENDPOINT.format(device_uuid=device_uuid, endpoint_data=str(endpoint))) continue resource_key, resource_value = endpoint if isinstance(resource_value, Exception): - errors.append(ERROR_GET.format(device_uuid, str(resource_key), str(resource_value))) + errors.append(ERROR_GET.format( + device_uuid=device_uuid, resource_key=str(resource_key), error=str(resource_value))) continue if resource_value is None: continue @@ -134,7 +136,9 @@ def _raw_config_rules_to_grpc( for resource_key, resource_value in raw_config_rules: if isinstance(resource_value, Exception): - errors.append(error_template.format(device_uuid, str(resource_key), str(resource_value))) + errors.append(error_template.format( + device_uuid=device_uuid, resource_key=str(resource_key), resource_value=str(resource_value), + error=str(resource_value))) continue if resource_value is None: continue @@ -223,7 +227,8 @@ def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loo if resource_key is None: kpi_sample_type_name = KpiSampleType.Name(kpi_sample_type).upper().replace('KPISAMPLETYPE_', '') MSG = ERROR_SAMPLETYPE.format( - str(device_uuid), str(endpoint_uuid), str(kpi_sample_type), str(kpi_sample_type_name) + device_uuid=str(device_uuid), endpoint_uuid=str(endpoint_uuid), sample_type_id=str(kpi_sample_type), + sample_type_name=str(kpi_sample_type_name) ) LOGGER.warning('{:s} Supported Device-Endpoint-KpiSampleType items: {:s}'.format( MSG, str(monitoring_loops.get_all_resource_keys()))) @@ -239,7 +244,8 @@ def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loo for (resource_key, duration, interval), result in zip(resources_to_subscribe, results_subscribestate): if isinstance(result, Exception): errors.append(ERROR_SUBSCRIBE.format( - str(device_uuid), str(resource_key), str(duration), str(interval), str(result) + device_uuid=str(device_uuid), subscr_key=str(resource_key), subscr_duration=str(duration), + subscr_interval=str(interval), error=str(result) )) continue @@ -253,7 +259,7 @@ def unsubscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_l kpi_details = monitoring_loops.get_kpi_by_uuid(kpi_uuid) if kpi_details is None: - return [ERROR_MISSING_KPI.format(str(kpi_uuid))] + return [ERROR_MISSING_KPI.format(kpi_uuid=str(kpi_uuid))] device_uuid, resource_key, sampling_duration, sampling_interval = kpi_details @@ -264,7 +270,9 @@ def unsubscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_l for (resource_key, duration, interval), result in zip(resources_to_unsubscribe, results_unsubscribestate): if isinstance(result, Exception): errors.append(ERROR_UNSUBSCRIBE.format( - device_uuid, str(resource_key), str(duration), str(interval), str(result))) + device_uuid=str(device_uuid), subscr_key=str(resource_key), subscr_duration=str(duration), + subscr_interval=str(interval), error=str(result) + )) continue monitoring_loops.remove_kpi(kpi_uuid) diff --git a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py index ea5cf2cb77e34fc4f9e88490ff400b92b1f64e66..5bbbf89e84e764677638b7e4e3f4934336321576 100644 --- a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py +++ b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py @@ -51,7 +51,7 @@ class SyntheticSamplingParameters: metric = match.group(2) metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '') - LOGGER.info(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense)) + LOGGER.debug(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense)) parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense) parameters = self.__data.get(parameters_key) diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index ef3d0728d5ed02ea4a15ba0c3ccd6f1428cab7df..a0c335193bdf91c71b2f24584f200361b0218310 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -38,8 +38,6 @@ logging.getLogger('apscheduler.executors.default').setLevel(logging.INFO if DEBU logging.getLogger('apscheduler.scheduler').setLevel(logging.INFO if DEBUG_MODE else logging.ERROR) logging.getLogger('monitoring-client').setLevel(logging.INFO if DEBUG_MODE else logging.ERROR) -LOGGER = logging.getLogger(__name__) - RE_GET_ENDPOINT_FROM_INTERFACE_KEY = re.compile(r'.*interface\[([^\]]+)\].*') RE_GET_ENDPOINT_FROM_INTERFACE_XPATH = re.compile(r".*interface\[oci\:name\='([^\]]+)'\].*") @@ -60,18 +58,18 @@ class NetconfSessionHandler: self.__connected = threading.Event() self.__address = address self.__port = int(port) - self.__username = settings.get('username') - self.__password = settings.get('password') - self.__vendor = settings.get('vendor') - self.__key_filename = settings.get('key_filename') - self.__hostkey_verify = settings.get('hostkey_verify', True) - self.__look_for_keys = settings.get('look_for_keys', True) - self.__allow_agent = settings.get('allow_agent', True) - self.__force_running = settings.get('force_running', False) - self.__commit_per_delete = settings.get('delete_rule', False) - self.__device_params = settings.get('device_params', {}) - self.__manager_params = settings.get('manager_params', {}) - self.__nc_params = settings.get('nc_params', {}) + self.__username = settings.get('username') + self.__password = settings.get('password') + self.__vendor = settings.get('vendor') + self.__key_filename = settings.get('key_filename') + self.__hostkey_verify = settings.get('hostkey_verify', True) + self.__look_for_keys = settings.get('look_for_keys', True) + self.__allow_agent = settings.get('allow_agent', True) + self.__force_running = settings.get('force_running', False) + self.__commit_per_rule = settings.get('commit_per_rule', False) + self.__device_params = settings.get('device_params', {}) + self.__manager_params = settings.get('manager_params', {}) + self.__nc_params = settings.get('nc_params', {}) self.__manager : Manager = None self.__candidate_supported = False @@ -94,7 +92,7 @@ class NetconfSessionHandler: def use_candidate(self): return self.__candidate_supported and not self.__force_running @property - def commit_per_rule(self): return self.__commit_per_delete + def commit_per_rule(self): return self.__commit_per_rule @property def vendor(self): return self.__vendor @@ -141,8 +139,9 @@ def compute_delta_sample(previous_sample, previous_timestamp, current_sample, cu return delta_sample class SamplesCache: - def __init__(self, netconf_handler : NetconfSessionHandler) -> None: + def __init__(self, netconf_handler : NetconfSessionHandler, logger : logging.Logger) -> None: self.__netconf_handler = netconf_handler + self.__logger = logger self.__lock = threading.Lock() self.__timestamp = None self.__absolute_samples = {} @@ -166,7 +165,7 @@ class SamplesCache: self.__absolute_samples[interface] = samples self.__timestamp = now except: # pylint: disable=bare-except - LOGGER.exception('Error collecting samples') + self.__logger.exception('Error collecting samples') def get(self, resource_key : str) -> Tuple[float, Dict]: self._refresh_samples() @@ -176,31 +175,33 @@ class SamplesCache: interface = match.group(1) return self.__timestamp, copy.deepcopy(self.__delta_samples.get(interface, {})) -def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples : queue.Queue) -> None: +def do_sampling( + samples_cache : SamplesCache, logger : logging.Logger, resource_key : str, out_samples : queue.Queue +) -> None: try: timestamp, samples = samples_cache.get(resource_key) counter_name = resource_key.split('/')[-1].split(':')[-1] value = samples.get(counter_name) if value is None: - LOGGER.warning('[do_sampling] value not found for {:s}'.format(resource_key)) + logger.warning('[do_sampling] value not found for {:s}'.format(resource_key)) return sample = (timestamp, resource_key, value) out_samples.put_nowait(sample) except: # pylint: disable=bare-except - LOGGER.exception('Error retrieving samples') + logger.exception('Error retrieving samples') def edit_config( - netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False, - target='running', default_operation='merge', test_option=None, error_option=None, + netconf_handler : NetconfSessionHandler, logger : logging.Logger, resources : List[Tuple[str, Any]], delete=False, + commit_per_rule=False, target='running', default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin ): str_method = 'DeleteConfig' if delete else 'SetConfig' - LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources))) + #logger.debug('[{:s}] resources = {:s}'.format(str_method, str(resources))) results = [None for _ in resources] for i,resource in enumerate(resources): str_resource_name = 'resources[#{:d}]'.format(i) try: - LOGGER.info('[{:s}] resource = {:s}'.format(str_method, str(resource))) + #logger.debug('[{:s}] resource = {:s}'.format(str_method, str(resource))) chk_type(str_resource_name, resource, (list, tuple)) chk_length(str_resource_name, resource, min_length=2, max_length=2) resource_key,resource_value = resource @@ -208,8 +209,8 @@ def edit_config( str_config_message = compose_config( resource_key, resource_value, delete=delete, vendor=netconf_handler.vendor) if str_config_message is None: raise UnsupportedResourceKeyException(resource_key) - LOGGER.info('[{:s}] str_config_message[{:d}] = {:s}'.format( - str_method, len(str_config_message), str(str_config_message))) + #logger.debug('[{:s}] str_config_message[{:d}] = {:s}'.format( + # str_method, len(str_config_message), str(str_config_message))) netconf_handler.edit_config( config=str_config_message, target=target, default_operation=default_operation, test_option=test_option, error_option=error_option, format=format) @@ -219,8 +220,16 @@ def edit_config( except Exception as e: # pylint: disable=broad-except str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting') msg = '[{:s}] Exception {:s} {:s}: {:s}' - LOGGER.exception(msg.format(str_method, str_operation, str_resource_name, str(resource))) + logger.exception(msg.format(str_method, str_operation, str_resource_name, str(resource))) results[i] = e # if validation fails, store the exception + + if not commit_per_rule: + try: + netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + msg = '[{:s}] Exception committing: {:s}' + logger.exception(msg.format(str_method, str_operation, str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource return results HISTOGRAM_BUCKETS = ( @@ -243,6 +252,7 @@ METRICS_POOL.get_or_create('UnsubscribeState', MetricTypeEnum.HISTOGRAM_DURATION class OpenConfigDriver(_Driver): def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called + self.__logger = logging.getLogger('{:s}:[{:s}:{:s}]'.format(str(__name__), str(address), str(port))) self.__lock = threading.Lock() #self.__initial = TreeNode('.') #self.__running = TreeNode('.') @@ -257,7 +267,7 @@ class OpenConfigDriver(_Driver): timezone=pytz.utc) self.__out_samples = queue.Queue() self.__netconf_handler : NetconfSessionHandler = NetconfSessionHandler(address, port, **settings) - self.__samples_cache = SamplesCache(self.__netconf_handler) + self.__samples_cache = SamplesCache(self.__netconf_handler, self.__logger) def Connect(self) -> bool: with self.__lock: @@ -295,13 +305,14 @@ class OpenConfigDriver(_Driver): try: chk_string(str_resource_name, resource_key, allow_empty=False) str_filter = get_filter(resource_key) - LOGGER.info('[GetConfig] str_filter = {:s}'.format(str(str_filter))) + #self.__logger.debug('[GetConfig] str_filter = {:s}'.format(str(str_filter))) if str_filter is None: str_filter = resource_key xml_data = self.__netconf_handler.get(filter=str_filter).data_ele if isinstance(xml_data, Exception): raise xml_data results.extend(parse(resource_key, xml_data)) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception retrieving {:s}: {:s}'.format(str_resource_name, str(resource_key))) + MSG = 'Exception retrieving {:s}: {:s}' + self.__logger.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) # if validation fails, store the exception return results @@ -312,17 +323,11 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - if self.__netconf_handler.commit_per_rule: - results = edit_config(self.__netconf_handler, resources, target='candidate', commit_per_rule= True) - else: - results = edit_config(self.__netconf_handler, resources, target='candidate') - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + results = edit_config( + self.__netconf_handler, self.__logger, resources, target='candidate', + commit_per_rule=self.__netconf_handler.commit_per_rule) else: - results = edit_config(self.__netconf_handler, resources) + results = edit_config(self.__netconf_handler, self.__logger, resources) return results @metered_subclass_method(METRICS_POOL) @@ -332,17 +337,11 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - if self.__netconf_handler.commit_per_rule: - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True) - else: - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + results = edit_config( + self.__netconf_handler, self.__logger, resources, target='candidate', delete=True, + commit_per_rule=self.__netconf_handler.commit_per_rule) else: - results = edit_config(self.__netconf_handler, resources, delete=True) + results = edit_config(self.__netconf_handler, self.__logger, resources, delete=True) return results @metered_subclass_method(METRICS_POOL) @@ -363,7 +362,8 @@ class OpenConfigDriver(_Driver): chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0) chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key))) + MSG = 'Exception validating {:s}: {:s}' + self.__logger.exception(MSG.format(str_subscription_name, str(resource_key))) results.append(e) # if validation fails, store the exception continue @@ -374,7 +374,7 @@ class OpenConfigDriver(_Driver): job_id = 'k={:s}/d={:f}/i={:f}'.format(resource_key, sampling_duration, sampling_interval) job = self.__scheduler.add_job( - do_sampling, args=(self.__samples_cache, resource_key, self.__out_samples), + do_sampling, args=(self.__samples_cache, self.__logger, resource_key, self.__out_samples), kwargs={}, id=job_id, trigger='interval', seconds=sampling_interval, start_date=start_date, end_date=end_date, timezone=pytz.utc) @@ -401,7 +401,8 @@ class OpenConfigDriver(_Driver): chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0) chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key))) + MSG = 'Exception validating {:s}: {:s}' + self.__logger.exception(MSG.format(str_subscription_name, str(resource_key))) results.append(e) # if validation fails, store the exception continue diff --git a/src/device/service/drivers/openconfig/templates/Acl.py b/src/device/service/drivers/openconfig/templates/Acl.py index 6cd90f373427e2ab55550985929c4cfcd8798702..c316772a56fefc1a7a27eef526f8c4f5a2e0aa83 100644 --- a/src/device/service/drivers/openconfig/templates/Acl.py +++ b/src/device/service/drivers/openconfig/templates/Acl.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py index e831d7738b3a09ae99773e1b882650554cfe5d78..02fda8f0e195c267fddb1109f184c8a06e4a6787 100644 --- a/src/device/service/drivers/openconfig/templates/EndPoints.py +++ b/src/device/service/drivers/openconfig/templates/EndPoints.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/Interfaces.py b/src/device/service/drivers/openconfig/templates/Interfaces.py index 3f5b104f2de01137c2424e776dc60b8416088de6..fbe5cfd22eb29131a601aa360ca82ef88c144d8e 100644 --- a/src/device/service/drivers/openconfig/templates/Interfaces.py +++ b/src/device/service/drivers/openconfig/templates/Interfaces.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/Namespace.py b/src/device/service/drivers/openconfig/templates/Namespace.py index 94af95566f33751a25fb1cb1c817cbffa910eec4..eede865502b043b7936d763c980be80a7ea817f8 100644 --- a/src/device/service/drivers/openconfig/templates/Namespace.py +++ b/src/device/service/drivers/openconfig/templates/Namespace.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py index 8399402fa76b8b6b00829493cc8ebd28fd6018f4..a5ba0de23612b69ef5e3d33fa1a89573c7c63e97 100644 --- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py +++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py index 068ca5430d9135e784dbe9a07f80d81472cbf5cc..1c2efa6122b617243de26b009b0c890fad80cf19 100644 --- a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py +++ b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/Tools.py b/src/device/service/drivers/openconfig/templates/Tools.py index 67b6ee89fb051b35afee34d7b35057cce5239d96..67d267b7d8c0b773f818052e01c3f2720f071902 100644 --- a/src/device/service/drivers/openconfig/templates/Tools.py +++ b/src/device/service/drivers/openconfig/templates/Tools.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py index 5e77b25fe3206407db9427085de70b95342d370a..c415bfd25725ca950c018e9f0eedfcde6e0df379 100644 --- a/src/device/service/drivers/openconfig/templates/__init__.py +++ b/src/device/service/drivers/openconfig/templates/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml index 4bc53ff1ddfbebbdcef2a0b4c37770210726676b..220f062b5da09d26ff6ec271491d6d40cfd46669 100644 --- a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml @@ -4,6 +4,7 @@ {% if operation is defined and operation != 'delete' %} {{name}} + ianaift:{{type}} {{mtu}} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml index bf8c0c0770f9344fbed16f3a6b09f7fa99a978ef..855f321b4a69ba1e660487c108a05d0ec4b5d475 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml @@ -2,7 +2,7 @@ {{name}} - + {{id}} {{id}} diff --git a/src/device/service/drivers/xr/README_XR.md b/src/device/service/drivers/xr/README_XR.md index 3bfdf5b019b3c36e7ded09d58ac625a48add36a9..fa1bc944035d27769cd9c16e0c29318e554e9489 100644 --- a/src/device/service/drivers/xr/README_XR.md +++ b/src/device/service/drivers/xr/README_XR.md @@ -25,6 +25,19 @@ cd ~/.kube microk8s config > config ``` +Helm 3 is mandatory as of February 2023. Enable it with microk8s command. Then create wrapper shell script to expose it with standard name: + +``` +sudo su - +cat > /usr/bin/helm3 +#!/bin/sh +microk8s.helm3 "$@" +^D +chmod 755 /usr/bin/helm3 +``` + +Using symbolic link does not work, because snap wraps the real binary and won't work if name is different. + Local Docker registry is needed for build results. Use the following command to start local registry (docker will pull necessary images from Internet) ```bash @@ -32,23 +45,33 @@ docker run -d -p 32000:5000 --restart=always --name registry registry:2 ``` Setup mydeploy script outside the git repo. E.g. following will do. SOURCE IT ON ALL SHELLS. - -IMPORTANT: September 2022 version of controller has a bug where any update to device trigger update to device -until GRPC endpoints are so loaded that K8s kills device service. XR does not need automation service, so it can -be left out. +Use https://labs.etsi.org/rep/tfs/controller/-/blob/develop/my_deploy.sh as example. +Script requires more variables than before as of February 2023. ```bash +# See https://labs.etsi.org/rep/tfs/controller/-/blob/develop/my_deploy.sh +# Use docker run -d -p 32000:5000 --restart=always --name registry registry:2 export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" -# Without automation service (see note above) -export TFS_COMPONENTS="context device pathcomp service slice compute monitoring webui" -# Correct setting -# export TFS_COMPONENTS="context device automation pathcomp service slice compute monitoring webui" -# Pre-rebase -#export TFS_COMPONENTS="context device automation service compute monitoring webui" +export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator" export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs" export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_GRAFANA_PASSWORD="admin123+" +#export TFS_SKIP_BUILD="" +export CRDB_NAMESPACE="crdb" +export CRDB_USERNAME="tfs" +export CRDB_PASSWORD="tfs123" +export CRDB_DATABASE="tfs" +export CRDB_DEPLOY_MODE="single" +export CRDB_DROP_DATABASE_IF_EXISTS="" +export CRDB_REDEPLOY="" +export NATS_NAMESPACE="nats" +export NATS_REDEPLOY="" +export QDB_NAMESPACE="qdb" +export QDB_USERNAME="admin" +export QDB_PASSWORD="quest" +export QDB_TABLE="tfs_monitoring" +export QDB_REDEPLOY="" ``` Build is containerized, pytest used for setup is not. Teraflow has some third party venv suggestion in docs. However standard venv works. Create: @@ -114,11 +137,44 @@ Setup service by following commands in src directory. Kubernetes endpoins change python -m pytest --verbose tests/ofc22/tests/test_functional_create_service_xr.py ``` +For topology different than used by the test_functional_create/delete_service_xr.py, one can also +use service-cli.py tool in the xr module directory. It allows creation of ELINE services between +arbitrary endpoints in the topology (with consequent underlying XR service instantiation). Run in +*xr module directory*. Representative examples: +``` + PYTHONPATH=../../../../ ./service-cli.py create 1 R1-EMU 13/1/2 500 2 R3-EMU 13/1/2 500 + PYTHONPATH=../../../../ ./service-cli.py list + PYTHONPATH=../../../../ ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f +``` + +It is also possible to create direct XR services without multi-layer services. E.g.: +``` + PYTHONPATH=../../../../ ./service-cli.py create-xr FooService X1-XR-CONSTELLATION "XR HUB 1|XR-T1" "XR LEAF 2|XR-T1" +``` + +Additionally it is possible to list services and endpoints: +``` + PYTHONPATH=../../../../ ./service-cli.py list-endpoints + PYTHONPATH=../../../../ ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f +``` + +The PYTHONPATH is mandatory. Suitable topology JSON must have been loaded before. With the +CocroachDB persistence, it is sufficient to load the topology once and it will persist. + Good logs to check are: * kubectl logs service/deviceservice --namespace tfs * kubectl logs service/webuiservice --namespace tfs +New 2.0 version of Teraflow has persistent database. To clean up any failed state +(e.g. from debugging session), set before deploy: + +``` +export CRDB_DROP_DATABASE_IF_EXISTS=YES +``` + +In normal test runs it is not necessary to clear the database. However DO NOT RE-UPLOAD THE TOPOLOGY JSON FILE if DB has not been cleared. + ## Unit Tests Run in src directory (src under repo top level) with command: diff --git a/src/device/service/drivers/xr/XrDriver.py b/src/device/service/drivers/xr/XrDriver.py index 565e3692feb88dd07779bb5f777b0061028f9776..605f4ce8d0f9c875a4b1736ff0aaa02fcb468778 100644 --- a/src/device/service/drivers/xr/XrDriver.py +++ b/src/device/service/drivers/xr/XrDriver.py @@ -106,8 +106,10 @@ class XrDriver(_Driver): def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: LOGGER.info(f"SetConfig[{self}]: {resources=}") # Logged config seems like: + # Pre-February 2023 #[('/service[52ff5f0f-fda4-40bd-a0b1-066f4ff04079:optical]', '{"capacity_unit": "GHz", "capacity_value": 1, "direction": "UNIDIRECTIONAL", "input_sip": "XR HUB 1|XR-T4", "layer_protocol_name": "PHOTONIC_MEDIA", "layer_protocol_qualifier": "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC", "output_sip": "XR LEAF 1|XR-T1", "uuid": "52ff5f0f-fda4-40bd-a0b1-066f4ff04079:optical"}')] - + # Post February 2023 + #[('/services/service[e1b9184c-767d-44b9-bf83-a1f643d82bef]', '{"capacity_unit": "GHz", "capacity_value": 50.0, "direction": "UNIDIRECTIONAL", "input_sip": "XR LEAF 1|XR-T1", "layer_protocol_name": "PHOTONIC_MEDIA", "layer_protocol_qualifier": "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC", "output_sip": "XR HUB 1|XR-T4", "uuid": "e1b9184c-767d-44b9-bf83-a1f643d82bef"}')] with self.__lock: if self.__constellation is None: self.__constellation = self.__cm_connection.get_constellation_by_hub_name(self.__hub_module_name) @@ -157,7 +159,7 @@ class XrDriver(_Driver): else: LOGGER.info(f"DeleteConfig: Connection {service_uuid} delete failure (was {str(connection)})") - if self.__constellation.is_vti_mode(): + if connection.is_vti_mode(): active_tc = self.__cm_connection.get_transport_capacity_by_teraflow_uuid(service_uuid) if active_tc is not None: if self.__cm_connection.delete_transport_capacity(active_tc.href): diff --git a/src/device/service/drivers/xr/cm-cli.py b/src/device/service/drivers/xr/cm-cli.py old mode 100644 new mode 100755 diff --git a/src/device/service/drivers/xr/cm/cm_connection.py b/src/device/service/drivers/xr/cm/cm_connection.py index 8ee9ee236c6bcfd504d4044dd023ef3a61fe4802..7128494510f40914917d2c3981158b6dd3571c70 100644 --- a/src/device/service/drivers/xr/cm/cm_connection.py +++ b/src/device/service/drivers/xr/cm/cm_connection.py @@ -241,7 +241,7 @@ class CmConnection: return self.__acquire_access_token() def list_constellations(self) -> List[Constellation]: - r = self.__get("/api/v1/ns/xr-networks?content=expanded") + r = self.__get("/api/v1/xr-networks?content=expanded") if not r.is_valid_json_list_with_status(200): return [] return [Constellation(c) for c in r.json] @@ -252,13 +252,13 @@ class CmConnection: ('content', 'expanded'), ('q', '{"hubModule.state.module.moduleName": "' + hub_module_name + '"}') ] - r = self.__get("/api/v1/ns/xr-networks?content=expanded", params=qparams) + r = self.__get("/api/v1/xr-networks?content=expanded", params=qparams) if not r.is_valid_json_list_with_status(200, 1, 1): return None return Constellation(r.json[0]) def get_transport_capacities(self) -> List[TransportCapacity]: - r= self.__get("/api/v1/ns/transport-capacities?content=expanded") + r= self.__get("/api/v1/transport-capacities?content=expanded") if not r.is_valid_json_list_with_status(200): return [] return [TransportCapacity(from_json=t) for t in r.json] @@ -268,7 +268,7 @@ class CmConnection: ('content', 'expanded'), ('q', '{"state.name": "' + tc_name + '"}') ] - r = self.__get("/api/v1/ns/transport-capacities?content=expanded", params=qparams) + r = self.__get("/api/v1/transport-capacities?content=expanded", params=qparams) if not r.is_valid_json_list_with_status(200, 1, 1): return TransportCapacity(from_json=r.json[0]) else: @@ -280,17 +280,17 @@ class CmConnection: def create_transport_capacity(self, tc: TransportCapacity) -> Optional[str]: # Create wants a list, so wrap connection to list tc_config = [tc.create_config()] - resp = self.__post("/api/v1/ns/transport-capacities", tc_config) + resp = self.__post("/api/v1/transport-capacities", tc_config) if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]: tc.href = resp.json[0]["href"] LOGGER.info(f"Created transport-capcity {tc}") - #LOGGER.info(self.__get(f"/api/v1/ns/transport-capacities{tc.href}?content=expanded")) + #LOGGER.info(self.__get(f"/api/v1/transport-capacities{tc.href}?content=expanded")) return tc.href else: return None def delete_transport_capacity(self, href: str) -> bool: - resp = self.__delete(f"/api/v1/ns/transport-capacities{href}") + resp = self.__delete(f"/api/v1/transport-capacities{href}") # Returns empty body if resp.is_valid_with_status_ignore_body(202): @@ -399,7 +399,7 @@ class CmConnection: # Create wants a list, so wrap connection to list cfg = [connection.create_config()] - resp = self.__post("/api/v1/ncs/network-connections", cfg) + resp = self.__post("/api/v1/network-connections", cfg) if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]: connection.href = resp.json[0]["href"] LOGGER.info(f"IPM accepted create request for connection {connection}") @@ -433,7 +433,7 @@ class CmConnection: # Perform deletes for ep_href in ep_deletes: - resp = self.__delete(f"/api/v1/ncs{ep_href}") + resp = self.__delete(f"/api/v1{ep_href}") if resp.is_valid_with_status_ignore_body(202): LOGGER.info(f"update_connection: EP-UPDATE: Deleted connection endpoint {ep_href}") else: @@ -441,21 +441,21 @@ class CmConnection: # Update capacities for otherwise similar endpoints for ep_href, ep_cfg in ep_updates: - resp = self.__put(f"/api/v1/ncs{ep_href}", ep_cfg) + resp = self.__put(f"/api/v1{ep_href}", ep_cfg) if resp.is_valid_with_status_ignore_body(202): LOGGER.info(f"update_connection: EP-UPDATE: Updated connection endpoint {ep_href} with {ep_cfg}") else: LOGGER.info(f"update_connection: EP-UPDATE: Failed to update connection endpoint {ep_href} with {ep_cfg}: {resp}") # Perform adds - resp = self.__post(f"/api/v1/ncs{href}/endpoints", ep_creates) + resp = self.__post(f"/api/v1{href}/endpoints", ep_creates) if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]: LOGGER.info(f"update_connection: EP-UPDATE: Created connection endpoints {resp.json[0]} with {ep_creates}") else: LOGGER.info(f"update_connection: EP-UPDATE: Failed to create connection endpoints {resp.json[0] if resp.json else None} with {ep_creates}: {resp}") # Connection update (excluding endpoints) - resp = self.__put(f"/api/v1/ncs{href}", cfg) + resp = self.__put(f"/api/v1{href}", cfg) # Returns empty body if resp.is_valid_with_status_ignore_body(202): LOGGER.info(f"update_connection: Updated connection {connection}") @@ -466,7 +466,7 @@ class CmConnection: return None def delete_connection(self, href: str) -> bool: - resp = self.__delete(f"/api/v1/ncs{href}") + resp = self.__delete(f"/api/v1{href}") #print(resp) # Returns empty body if resp.is_valid_with_status_ignore_body(202): @@ -489,7 +489,7 @@ class CmConnection: ('content', 'expanded'), ('q', '{"state.name": "' + connection_name + '"}') ] - r = self.__get("/api/v1/ncs/network-connections", params=qparams) + r = self.__get("/api/v1/network-connections", params=qparams) if r.is_valid_json_list_with_status(200, 1, 1): return Connection(from_json=r.json[0]) else: @@ -499,7 +499,7 @@ class CmConnection: qparams = [ ('content', 'expanded'), ] - r = self.__get(f"/api/v1/ncs{href}", params=qparams) + r = self.__get(f"/api/v1{href}", params=qparams) if r.is_valid_json_obj_with_status(200): return Connection(from_json=r.json) else: @@ -509,14 +509,14 @@ class CmConnection: return self.get_connection_by_name(f"TF:{uuid}") def get_connections(self): - r = self.__get("/api/v1/ncs/network-connections?content=expanded") + r = self.__get("/api/v1/network-connections?content=expanded") if r.is_valid_json_list_with_status(200): return [Connection(from_json=c) for c in r.json] else: return [] def service_uuid(self, key: str) -> Optional[str]: - service = re.match(r"^/service\[(.+)\]$", key) + service = re.match(r"^(?:/services)/service\[(.+)\]$", key) if service: return service.group(1) else: diff --git a/src/device/service/drivers/xr/cm/connection.py b/src/device/service/drivers/xr/cm/connection.py index 98736cce534685189069703d9560b9d34b1d8007..321922b1cb81eb1cedee673f40b232c038abd8af 100644 --- a/src/device/service/drivers/xr/cm/connection.py +++ b/src/device/service/drivers/xr/cm/connection.py @@ -165,6 +165,9 @@ class Connection: endpoints = ", ".join((str(ep) for ep in self.endpoints)) return f"name: {name}, id: {self.href}, service-mode: {self.serviceMode}, end-points: [{endpoints}]" + def is_vti_mode(self) -> bool: + return "XR-VTI-P2P" == self.serviceMode + def __guess_service_mode_from_emulated_enpoints(self): for ep in self.endpoints: if ep.vlan is not None: diff --git a/src/device/service/drivers/xr/cm/tests/test_cm_connection.py b/src/device/service/drivers/xr/cm/tests/test_cm_connection.py index 4f45be686c2c3a0f619d58230b2c52ed66a3eb6f..a7944ed220c6d68aad2f122a0bb0d2c1f83fdd06 100644 --- a/src/device/service/drivers/xr/cm/tests/test_cm_connection.py +++ b/src/device/service/drivers/xr/cm/tests/test_cm_connection.py @@ -59,7 +59,7 @@ def test_cmc_connect(): def test_cmc_get_constellations(): with mock_cm_connectivity() as m: - m.get("https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded", text=res_constellations) + m.get("https://127.0.0.1:9999/api/v1/xr-networks?content=expanded", text=res_constellations) cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) assert cm.Connect() @@ -73,7 +73,7 @@ def test_cmc_get_constellations(): ['XR HUB 1|XR-T1', 'XR HUB 1|XR-T2', 'XR HUB 1|XR-T3', 'XR HUB 1|XR-T4', 'XR LEAF 1|XR-T1', 'XR LEAF 2|XR-T1']] # Get constellation by hub module name - m.get("https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D", text=res_constellation_by_name_hub1) + m.get("https://127.0.0.1:9999/api/v1/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D", text=res_constellation_by_name_hub1) constellation = cm.get_constellation_by_hub_name("XR HUB 1") assert constellation assert constellation.ifnames() == ['XR HUB 1|XR-T1', 'XR HUB 1|XR-T2', 'XR HUB 1|XR-T3', 'XR HUB 1|XR-T4', 'XR LEAF 1|XR-T1', 'XR LEAF 2|XR-T1'] diff --git a/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py b/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py index 3bfd63def82ae89f53ab6ec3a5fc18bd79ecd38f..e9b16b62034bcd42061907d920b757b59766f562 100644 --- a/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py +++ b/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py @@ -38,8 +38,8 @@ with open(os.path.join(resources, "connections-expanded.json"), "r", encoding="U def mock_cm(): m = requests_mock.Mocker() m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=access_token) - m.get("https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D", text=res_constellation_by_name_hub1) - m.post("https://127.0.0.1:9999/api/v1/ncs/network-connections", text='[{"href":"/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432","rt":["cm.network-connection"]}]', status_code=202) + m.get("https://127.0.0.1:9999/api/v1/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D", text=res_constellation_by_name_hub1) + m.post("https://127.0.0.1:9999/api/v1/network-connections", text='[{"href":"/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432","rt":["cm.network-connection"]}]', status_code=202) return m uuid = "12345ABCDEFGHIJKLMN" @@ -69,9 +69,9 @@ def test_xr_set_config(): called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history] expected_mocks = [ ('POST', 'https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token'), # Authentication - ('GET', 'https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create - ('POST', 'https://127.0.0.1:9999/api/v1/ncs/network-connections') # Create + ('GET', 'https://127.0.0.1:9999/api/v1/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create + ('POST', 'https://127.0.0.1:9999/api/v1/network-connections') # Create ] assert called_mocks == expected_mocks @@ -97,7 +97,7 @@ def test_xr_set_config_consistency_lifecycle(): json_non_terminal = copy.deepcopy(json_terminal) json_non_terminal["state"]["lifecycleState"] = "pendingConfiguration" # We go trough 404 and non-terminal lstate first and then terminal state. - m.get("https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", + m.get("https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", [{'text': '', 'status_code': 404}, { 'json': json_non_terminal, 'status_code': 200 }, {'json': json_terminal, 'status_code': 200 }]) @@ -108,19 +108,19 @@ def test_xr_set_config_consistency_lifecycle(): called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history] expected_mocks = [ ('POST', 'https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token'), # Authentication - ('GET', 'https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create - ('POST', 'https://127.0.0.1:9999/api/v1/ncs/network-connections'), # Create - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> no REST API object - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> non-terminal - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded') # Life cycle state check --> terminal + ('GET', 'https://127.0.0.1:9999/api/v1/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create + ('POST', 'https://127.0.0.1:9999/api/v1/network-connections'), # Create + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> no REST API object + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> non-terminal + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded') # Life cycle state check --> terminal ] assert called_mocks == expected_mocks ################################################################################ # Same as before, but without life cycle progress m.reset_mock() - m.get("https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", + m.get("https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", [{'text': '', 'status_code': 401}, { 'json': json_non_terminal, 'status_code': 200 }]) @@ -129,10 +129,10 @@ def test_xr_set_config_consistency_lifecycle(): called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history] expected_mocks_no_connect = [ - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create - ('POST', 'https://127.0.0.1:9999/api/v1/ncs/network-connections'), # Create - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> no REST API object - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> non-terminal + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create + ('POST', 'https://127.0.0.1:9999/api/v1/network-connections'), # Create + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> no REST API object + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432?content=expanded'), # Life cycle state check --> non-terminal ] assert called_mocks == repeat_last_expected(expected_mocks_no_connect, called_mocks) @@ -143,7 +143,7 @@ def test_xr_set_config_consistency_lifecycle(): assert cm.Connect() constellation = cm.get_constellation_by_hub_name("XR HUB 1") assert constellation - m.get("https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", + m.get("https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", [{'text': '', 'status_code': 401}, { 'json': json_non_terminal, 'status_code': 200 }]) result = set_config_for_service(cm, constellation, uuid, config) @@ -158,7 +158,7 @@ def test_xr_set_config_consistency_lifecycle(): assert cm.Connect() constellation = cm.get_constellation_by_hub_name("XR HUB 1") assert constellation - m.get("https://127.0.0.1:9999/api/v1/ncs/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", + m.get("https://127.0.0.1:9999/api/v1/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432", [{'text': '', 'status_code': 401}]) result = set_config_for_service(cm, constellation, uuid, config) _validate_result(result, False) @@ -175,15 +175,15 @@ def test_xr_set_config_update_case(): assert constellation # Fake existing service (--> update path is taken) - m.get("https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D", json=res_connection_by_name_json) + m.get("https://127.0.0.1:9999/api/v1/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D", json=res_connection_by_name_json) # Delete endpoint that is no longer necessary - m.delete("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388", text="", status_code = 202) + m.delete("https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388", text="", status_code = 202) # Update changed endpoint - m.put("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6", text="", status_code = 202) + m.put("https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6", text="", status_code = 202) # Create the newly added endpoint - m.post("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints", json=[{"href":"/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoint/somethingplausible","rt":["plausible"]}], status_code=202) + m.post("https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints", json=[{"href":"/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoint/somethingplausible","rt":["plausible"]}], status_code=202) # Update the connection itself - m.put("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03", text="", status_code=202) + m.put("https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03", text="", status_code=202) result = set_config_for_service(cm, constellation, uuid, config) _validate_result(result, True) @@ -191,11 +191,11 @@ def test_xr_set_config_update_case(): called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history] expected_mocks = [ ('POST', 'https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token'), # Authentication - ('GET', 'https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name - ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create - ('DELETE', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388'), # Delete unnecessary endpoint - ('PUT', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6'), # Update changed endpoint - ('POST', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints'), # Add new endpoint - ('PUT', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03') # Update the connection itself + ('GET', 'https://127.0.0.1:9999/api/v1/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name + ('GET', 'https://127.0.0.1:9999/api/v1/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create + ('DELETE', 'https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388'), # Delete unnecessary endpoint + ('PUT', 'https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6'), # Update changed endpoint + ('POST', 'https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints'), # Add new endpoint + ('PUT', 'https://127.0.0.1:9999/api/v1/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03') # Update the connection itself ] assert called_mocks == expected_mocks diff --git a/src/device/service/drivers/xr/service-cli.py b/src/device/service/drivers/xr/service-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..7ab9606cef7bd7d3cca4f414cbd704ab150c8f52 --- /dev/null +++ b/src/device/service/drivers/xr/service-cli.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Manage L2 services (with underlying XR connectivity) without need to use unit test +# files or excessive JSON definitions +# +# Run in this directory with PYTHONPATH=../../../../ +# E.g.: +# Create multi-layer service (L2 VPN over XR): +# PYTHONPATH=../../../../ ./service-cli.py create 1 R1-EMU 13/1/2 500 2 R3-EMU 13/1/2 500 +# Single-layer (XR without services on top of it): +# PYTHONPATH=../../../../ ./service-cli.py create-xr FooService X1-XR-CONSTELLATION "XR HUB 1|XR-T1" "XR LEAF 2|XR-T1" +# List services: +# PYTHONPATH=../../../../ ./service-cli.py list +# List possible endpoints: +# PYTHONPATH=../../../../ ./service-cli.py list-endpoints +# Delete service (if multi-layer, always deleter highest layer!) +# PYTHONPATH=../../../../ ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f + +import argparse +import logging +from copy import deepcopy +from dataclasses import dataclass, field +from typing import Dict +from contextlib import contextmanager + +from common.Settings import get_setting +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.tools.mock_osm.MockOSM import MockOSM +from common.proto.context_pb2 import ContextId, ServiceTypeEnum, ServiceStatusEnum, Service, Empty, ServiceId +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Topology import json_topology_id +from common.tools.object_factory.ConfigRule import json_config_rule_set + +LOGGER = logging.getLogger(__name__) + +WIM_USERNAME = 'admin' +WIM_PASSWORD = 'admin' + +@contextmanager +def make_context_client(): + try: + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + finally: + _client.close() + +@contextmanager +def make_service_client(): + try: + _client = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + finally: + _client.close() + +def make_osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) + +@dataclass +class DevInfo: + name: str + uuid: str + endpoints: Dict[str, str] = field(default_factory= dict) + endpoints_by_uuid: Dict[str, str] = field(default_factory= dict) + + def get_endpoint_uuid_or_exit(self, ep_name: str) -> str: + if ep_name not in self.endpoints: + print(f"Endpoint {ep_name} does not exist in device {self.name}. See \"service-cli.py list-endpoints\"") + exit(-1) + return self.endpoints[ep_name] + +def get_devices(cc: ContextClient) -> Dict[str, DevInfo]: + r = cc.ListDevices(Empty()) + # print(grpc_message_to_json_string(r)) + + devices = dict() + for dev in r.devices: + di = DevInfo(dev.name, dev.device_id.device_uuid.uuid) + for ep in dev.device_endpoints: + di.endpoints[ep.name] = ep.endpoint_id.endpoint_uuid.uuid + di.endpoints_by_uuid[ep.endpoint_id.endpoint_uuid.uuid] = ep.name + devices[dev.name] = di + return devices + +def get_endpoint_map(devices: Dict[str, DevInfo]): + ep_map = dict() + for dev in devices.values(): + for ep_name, ep_uuid in dev.endpoints.items(): + ep_map[ep_uuid] = (dev.name, ep_name) + return ep_map + +logging.basicConfig(level=logging.ERROR) + +parser = argparse.ArgumentParser(description='TF Service Management Utility') +subparsers = parser.add_subparsers(dest="command") +subparsers.required = True + +create_parser = subparsers.add_parser('create') +create_parser.add_argument('site1', type=int, help='One endpoint of the service, e.g. 1') +create_parser.add_argument('device1', type=str, help='One endpoint of the service, e.g. R1-EMU') +create_parser.add_argument('interface1', type=str, help='One endpoint of the service, e.g. 13/1/2') +create_parser.add_argument('vlan1', type=int, help='VLAN in first endpoint, e.g. 500') + +create_parser.add_argument('site2', type=int, help='One endpoint of the service, e.g. 2') +create_parser.add_argument('device2', type=str, help='One endpoint of the service, e.g. R3-EMU') +create_parser.add_argument('interface2', type=str, help='One endpoint of the service, e.g. 13/1/2') +create_parser.add_argument('vlan2', type=int, help='VLAN in first endpoint, e.g. 500') + +delete_parser = subparsers.add_parser('delete') +delete_parser.add_argument('service_uuid', type=str, help='UUID of the service to be deleted') + +list_parser = subparsers.add_parser('list') +list_parser = subparsers.add_parser('list-endpoints') + +create_xr_parser = subparsers.add_parser('create-xr') +create_xr_parser.add_argument('service_name', type=str, help='Service Name') +create_xr_parser.add_argument('constellation', type=str, help='XR Constellation') +create_xr_parser.add_argument('interface1', type=str, help='One endpoint of the service') +create_xr_parser.add_argument('interface2', type=str, help='Second endpoint of the service') + +args = parser.parse_args() + +WIM_SERVICE_TYPE = 'ELINE' +CONTEXT_ID = {'context_uuid': {'uuid': 'admin'}} + +if args.command == "create": + endpoint1 = f"{args.device1}:{args.interface1}" + endpoint2 = f"{args.device2}:{args.interface2}" + + WIM_MAPPING = [ + {'device-id': args.device1, 'service_endpoint_id': endpoint1, + 'service_mapping_info': {'bearer': {'bearer-reference': endpoint1}, 'site-id': args.site1}}, + {'device-id': args.device2, 'service_endpoint_id': endpoint2, + 'service_mapping_info': {'bearer': {'bearer-reference': endpoint2}, 'site-id': args.site2}}, + ] + WIM_SERVICE_CONNECTION_POINTS = [ + {'service_endpoint_id': endpoint1, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': args.vlan1}}, + {'service_endpoint_id': endpoint2, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': args.vlan2}}, + ] +else: + WIM_MAPPING = [] + WIM_SERVICE_CONNECTION_POINTS = [] + +#print(str(args)) +#print(f"=== WIM_SERVICE_TYPE: {WIM_SERVICE_TYPE}") +#print(f"=== WIM_SERVICE_CONNECTION_POINTS: {WIM_SERVICE_CONNECTION_POINTS}") +#print(f"=== WIM_MAPPING: {WIM_MAPPING}") + +with make_context_client() as client: + # We only permit one context on our demos/testing + response = client.ListContextIds(Empty()) + assert len(response.context_ids) == 1 + context_uuid=json_context_id(response.context_ids[0].context_uuid.uuid) + + osm_wim = make_osm_wim() + + if args.command == "create": + service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) + print(f"*** Create connectivity service --> {service_uuid}") + status = osm_wim.get_connectivity_service_status(service_uuid) + print(f"*** Get created service status --> {str(status)}") + + elif args.command == "delete": + service_id = { + "context_id": context_uuid, + "service_uuid": { + "uuid": args.service_uuid + } + } + + try: + response = client.GetService(ServiceId(**service_id)) + #print(grpc_message_to_json_string(response)) + + high_level_delete = response.service_type == ServiceTypeEnum.SERVICETYPE_L2NM or response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM + print(f"Deleting service {response.name}, type {ServiceTypeEnum.Name(response.service_type)}, {high_level_delete=}") + + except: + print(f"No service with uuid {args.service_uuid} ({service_id})") + exit(-1) + + if high_level_delete: + osm_wim.wim.check_credentials() + try: + osm_wim.wim.delete_connectivity_service(args.service_uuid) + print(f"*** Service {args.service_uuid} deleted (L2SM/L3SM layer)") + except Exception as e: + print(f"*** Failed to delete service {args.service_uuid}, {e}") + else: + with make_service_client() as service_client: + try: + service_client.DeleteService(ServiceId(**service_id)) + print(f"*** Service {args.service_uuid} deleted (low level)") + except Exception as e: + print(f"*** Failed to delete service {args.service_uuid}, {e}") + + elif args.command == "create-xr": + CONTEXT_NAME = 'admin' + CONTEXT_ID = json_context_id(CONTEXT_NAME) + CONTEXT = json_context(CONTEXT_NAME, name=CONTEXT_NAME) + + json_tapi_settings = { + 'capacity_value' : 50.0, + 'capacity_unit' : 'GHz', + 'layer_proto_name': 'PHOTONIC_MEDIA', + 'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC', + 'direction' : 'UNIDIRECTIONAL', + } + config_rule = json_config_rule_set('/settings', json_tapi_settings) + + devices = get_devices(client) + if args.constellation not in devices: + print(f"Constellation {args.constellation} does not exist as a device. See \"service-cli.py list-endpoints\"") + exit(-1) + else: + dev_info = devices[args.constellation] + constellation_uuid = dev_info.uuid + + interface1_uuid = dev_info.get_endpoint_uuid_or_exit(args.interface1) + interface2_uuid = dev_info.get_endpoint_uuid_or_exit(args.interface2) + + print(f"Constellation {args.constellation:40}: {constellation_uuid:36}") + print(f"Interface 1 {args.interface1:40}: {interface1_uuid:36}") + print(f"Interface 2 {args.interface2:40}: {interface2_uuid:36}") + + service_request = { + "name": args.service_name, + "service_id": { + "context_id": {"context_uuid": {"uuid": response.context_ids[0].context_uuid.uuid}}, + "service_uuid": {"uuid": args.service_name} + }, + 'service_type' : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, + "service_endpoint_ids": [ + {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': interface1_uuid}, 'topology_id': json_topology_id("admin", context_id=context_uuid)}, + {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': interface2_uuid}, 'topology_id': json_topology_id("admin", context_id=context_uuid)} + ], + 'service_status' : {'service_status': ServiceStatusEnum.SERVICESTATUS_PLANNED}, + 'service_constraints' : [], + } + + with make_service_client() as service_client: + sr = deepcopy(service_request) + endpoints, sr['service_endpoint_ids'] = sr['service_endpoint_ids'], [] + create_response = service_client.CreateService(Service(**sr)) + print(f'CreateService: {grpc_message_to_json_string(create_response)}') + + sr['service_endpoint_ids'] = endpoints + #sr['service_id']['service_uuid'] = create_response + sr['service_config'] = {'config_rules': [config_rule]} + + update_response = service_client.UpdateService(Service(**sr)) + print(f'UpdateService: {grpc_message_to_json_string(update_response)}') + + elif args.command == "list": + devices = get_devices(client) + ep_map = get_endpoint_map(devices) + + response = client.ListServices(ContextId(**CONTEXT_ID)) + + # print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + for service in response.services: + scs = "" + + ep_list = [] + for ep in service.service_endpoint_ids: + ep_uuid = ep.endpoint_uuid.uuid + if ep_uuid in ep_map: + dev_name, ep_name = ep_map[ep_uuid] + ep_list.append(f"{dev_name}:{ep_name}") + ep_list.sort() + eps = ", ".join(ep_list) + + #print(f"{service.service_id.service_uuid.uuid:36} {ServiceTypeEnum.Name(service.service_type):40} {service.name:40} {ServiceStatusEnum.Name(service.service_status.service_status)} {scs}") + print(f"{service.service_id.service_uuid.uuid:36} {ServiceTypeEnum.Name(service.service_type):40} {service.name:40} {ServiceStatusEnum.Name(service.service_status.service_status):28} {eps}") + + elif args.command == "list-endpoints": + devices = get_devices(client) + for name in sorted(devices.keys()): + dev = devices[name] + print(f"{name:40} {dev.uuid:36}") + for ep_name in sorted(dev.endpoints.keys()): + print(f" {ep_name:40} {dev.endpoints[ep_name]:36}") diff --git a/src/device/service/drivers/xr/setup_test_env.sh b/src/device/service/drivers/xr/setup_test_env.sh index 92ff4a0312fb8f963f934f4cfd8d18603675aed0..bd5463cd4f9d08c903fc601cfcb7241b672e7681 100755 --- a/src/device/service/drivers/xr/setup_test_env.sh +++ b/src/device/service/drivers/xr/setup_test_env.sh @@ -17,7 +17,11 @@ export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get service/contextservice --namesp export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service/contextservice --namespace tfs -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}') export COMPUTESERVICE_SERVICE_HOST=$(kubectl get service/computeservice --namespace tfs --template '{{.spec.clusterIP}}') export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service/computeservice --namespace tfs -o jsonpath='{.spec.ports[?(@.name=="http")].port}') +export SERVICESERVICE_SERVICE_HOST=$(kubectl get service/serviceservice --namespace tfs --template '{{.spec.clusterIP}}') +export SERVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service/serviceservice --namespace tfs -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}') echo "CONTEXTSERVICE_SERVICE_HOST=$CONTEXTSERVICE_SERVICE_HOST" echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=$CONTEXTSERVICE_SERVICE_PORT_GRPC" echo "COMPUTESERVICE_SERVICE_HOST=$COMPUTESERVICE_SERVICE_HOST" echo "COMPUTESERVICE_SERVICE_PORT_HTTP=$COMPUTESERVICE_SERVICE_PORT_HTTP" +echo "SERVICESERVICE_SERVICE_HOST=$SERVICESERVICE_SERVICE_HOST" +echo "SERVICESERVICE_SERVICE_PORT_GRPC=$SERVICESERVICE_SERVICE_PORT_GRPC" diff --git a/src/device/tests/Device_OpenConfig_Template.py b/src/device/tests/Device_OpenConfig_Template.py index 8ab45337514bf354d8b338c8bb97721d099355f4..b9aae79a2b0e5a38a556e50dd2445592caca4daf 100644 --- a/src/device/tests/Device_OpenConfig_Template.py +++ b/src/device/tests/Device_OpenConfig_Template.py @@ -32,7 +32,7 @@ DEVICE_OC_CONNECT_RULES = json_device_connect_rules(DEVICE_OC_ADDRESS, DEVICE_OC 'hostkey_verify' : True, 'look_for_keys' : True, 'allow_agent' : True, - 'delete_rule' : False, + 'commit_per_rule': False, 'device_params' : {'name': 'default'}, 'manager_params' : {'timeout' : DEVICE_OC_TIMEOUT}, }) diff --git a/src/dlt/gateway/Dockerfile b/src/dlt/gateway/Dockerfile index 92ef8e425f40eaf718c4562c836517128dbb2d6f..351f21c9361a5a1313a6d857b41acd4155afc0cd 100644 --- a/src/dlt/gateway/Dockerfile +++ b/src/dlt/gateway/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/dlt/gateway/settings.gradle.kts b/src/dlt/gateway/settings.gradle.kts index 9c09bb933a23509312b8dfac226caed41f55b053..77fa0f0b22918cf306f0e5f07506a35e492142b4 100644 --- a/src/dlt/gateway/settings.gradle.kts +++ b/src/dlt/gateway/settings.gradle.kts @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) + * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/load_generator/client/LoadGeneratorClient.py b/src/load_generator/client/LoadGeneratorClient.py index 99626bbbb59671af41c11054d34338194f42a6af..2bed40dfdfe13d2920166bcb56237fe84bff8789 100644 --- a/src/load_generator/client/LoadGeneratorClient.py +++ b/src/load_generator/client/LoadGeneratorClient.py @@ -16,6 +16,7 @@ import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2 import Parameters, Status from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -46,12 +47,19 @@ class LoadGeneratorClient: self.stub = None @RETRY_DECORATOR - def Start(self, request : Empty) -> Empty: + def Start(self, request : Parameters) -> Empty: LOGGER.debug('Start request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.Start(request) LOGGER.debug('Start result: {:s}'.format(grpc_message_to_json_string(response))) return response + @RETRY_DECORATOR + def GetStatus(self, request : Empty) -> Status: + LOGGER.debug('GetStatus request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.GetStatus(request) + LOGGER.debug('GetStatus result: {:s}'.format(grpc_message_to_json_string(response))) + return response + @RETRY_DECORATOR def Stop(self, request : Empty) -> Empty: LOGGER.debug('Stop request: {:s}'.format(grpc_message_to_json_string(request))) diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py index b71dd9a35329e2aef6ce64739f59103a656b4de3..9ae3cdc1216891ca4dfcf01c1bd49d27bf4ef6f6 100644 --- a/src/load_generator/load_gen/Constants.py +++ b/src/load_generator/load_gen/Constants.py @@ -26,3 +26,5 @@ ENDPOINT_COMPATIBILITY = { 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:INPUT': 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:OUTPUT', 'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT' : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT', } + +MAX_WORKER_THREADS = 10 \ No newline at end of file diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index 906c26e98a75fe3c8f15d628f863faac4ba2ea16..7e0acbe76371628f579cfcf2ebe702ce6170a8a7 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -14,9 +14,11 @@ import logging, json, random, threading from typing import Dict, Optional, Set, Tuple -from common.proto.context_pb2 import Empty, TopologyId +from common.proto.context_pb2 import Empty, IsolationLevelEnum, TopologyId from common.tools.grpc.Tools import grpc_message_to_json -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import ( + json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_isolation, + json_constraint_sla_latency) from common.tools.object_factory.ConfigRule import json_config_rule_set from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.EndPoint import json_endpoint_id @@ -32,11 +34,21 @@ from .Parameters import Parameters LOGGER = logging.getLogger(__name__) +ROUTER_ID = { + 'R149': '5.5.5.5', + 'R155': '5.5.5.1', +} + +VIRTUAL_CIRCUIT = { + 'R149': '5.5.5.5', + 'R155': '5.5.5.1', +} + class RequestGenerator: def __init__(self, parameters : Parameters) -> None: self._parameters = parameters self._lock = threading.Lock() - self._num_requests = 0 + self._num_generated = 0 self._available_device_endpoints : Dict[str, Set[str]] = dict() self._used_device_endpoints : Dict[str, Dict[str, str]] = dict() self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict() @@ -45,6 +57,12 @@ class RequestGenerator: self._device_data : Dict[str, Dict] = dict() self._device_endpoint_data : Dict[str, Dict[str, Dict]] = dict() + @property + def num_generated(self): return self._num_generated + + @property + def infinite_loop(self): return self._parameters.num_requests == 0 + def initialize(self) -> None: with self._lock: self._available_device_endpoints.clear() @@ -96,17 +114,14 @@ class RequestGenerator: if self._parameters.record_to_dlt: record_link_to_dlt(dlt_connector_client, dlt_domain_id, link.link_id) - @property - def num_requests_generated(self): return self._num_requests - def dump_state(self) -> None: with self._lock: _endpoints = { device_uuid:[endpoint_uuid for endpoint_uuid in endpoint_uuids] for device_uuid,endpoint_uuids in self._available_device_endpoints.items() } - LOGGER.info('[dump_state] available_device_endpoints = {:s}'.format(json.dumps(_endpoints))) - LOGGER.info('[dump_state] used_device_endpoints = {:s}'.format(json.dumps(self._used_device_endpoints))) + LOGGER.debug('[dump_state] available_device_endpoints = {:s}'.format(json.dumps(_endpoints))) + LOGGER.debug('[dump_state] used_device_endpoints = {:s}'.format(json.dumps(self._used_device_endpoints))) def _use_device_endpoint( self, service_uuid : str, request_type : RequestType, endpoint_types : Optional[Set[str]] = None, @@ -167,10 +182,13 @@ class RequestGenerator: self._used_device_endpoints.setdefault(device_uuid, dict()).pop(endpoint_uuid, None) self._available_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid) - def compose_request(self) -> Optional[Dict]: + def compose_request(self) -> Tuple[bool, Optional[Dict]]: # completed, request with self._lock: - self._num_requests += 1 - num_request = self._num_requests + if not self.infinite_loop and (self._num_generated >= self._parameters.num_requests): + LOGGER.info('Generation Done!') + return True, None # completed + self._num_generated += 1 + num_request = self._num_generated #request_uuid = str(uuid.uuid4()) request_uuid = 'svc_{:d}'.format(num_request) @@ -181,9 +199,9 @@ class RequestGenerator: if request_type in { RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, RequestType.SERVICE_TAPI, RequestType.SERVICE_MW }: - return self._compose_service(num_request, request_uuid, request_type) + return False, self._compose_service(num_request, request_uuid, request_type) elif request_type in {RequestType.SLICE_L2NM, RequestType.SLICE_L3NM}: - return self._compose_slice(num_request, request_uuid, request_type) + return False, self._compose_slice(num_request, request_uuid, request_type) def _compose_service(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]: # choose source endpoint @@ -222,86 +240,110 @@ class RequestGenerator: ] if request_type == RequestType.SERVICE_L2NM: + availability = round(random.uniform(0.0, 99.9999), ndigits=5) + capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) + e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) + constraints = [ - json_constraint_custom('bandwidth[gbps]', '10.0'), - json_constraint_custom('latency[ms]', '20.0'), + json_constraint_sla_availability(1, True, availability), + json_constraint_sla_capacity(capacity_gbps), + json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), + json_constraint_sla_latency(e2e_latency_ms), ] + vlan_id = num_request % 1000 - circuit_id = '{:03d}'.format(vlan_id) + circuit_id = '{:03d}'.format(vlan_id + 100) src_device_name = self._device_data[src_device_uuid]['name'] - src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) + src_router_id = ROUTER_ID.get(src_device_name, '10.0.0.{:d}'.format(int(src_device_name.replace('R', '')))) dst_device_name = self._device_data[dst_device_uuid]['name'] - dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) + dst_router_id = ROUTER_ID.get(dst_device_name, '10.0.0.{:d}'.format(int(dst_device_name.replace('R', '')))) config_rules = [ json_config_rule_set('/settings', { 'mtu': 1512 }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { - 'router_id': src_router_id, - 'sub_interface_index': vlan_id, + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_name, src_endpoint_name), { + 'sub_interface_index': 0, 'vlan_id': vlan_id, 'remote_router': dst_router_id, 'circuit_id': circuit_id, - }), + }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { - 'router_id': dst_router_id, - 'sub_interface_index': vlan_id, + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_name, dst_endpoint_name), { + 'sub_interface_index': 0, 'vlan_id': vlan_id, 'remote_router': src_router_id, 'circuit_id': circuit_id, - }), + }), ] return json_service_l2nm_planned( request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) elif request_type == RequestType.SERVICE_L3NM: + availability = round(random.uniform(0.0, 99.9999), ndigits=5) + capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) + e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) + constraints = [ - json_constraint_custom('bandwidth[gbps]', '10.0'), - json_constraint_custom('latency[ms]', '20.0'), + json_constraint_sla_availability(1, True, availability), + json_constraint_sla_capacity(capacity_gbps), + json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), + json_constraint_sla_latency(e2e_latency_ms), ] - vlan_id = num_request % 1000 - bgp_as = 60000 + (num_request % 10000) - bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333) + + bgp_as = 65000 + (num_request % 10000) + + vlan_id = num_request % 100 +100 + x = num_request % 255 + y = num_request % 25 * num_request % 10 route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id) src_device_name = self._device_data[src_device_uuid]['name'] src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name'] - src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) - src_address_ip = '.'.join([src_device_name.replace('R', ''), '0'] + src_endpoint_name.split('/')) + src_router_id = ROUTER_ID.get(src_device_name) + src_router_num = int(src_device_name.replace('R', '')) + if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num) + src_address_ip = '10.{:d}.{:d}.{:d}'.format(x, y, src_router_num) dst_device_name = self._device_data[dst_device_uuid]['name'] dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name'] - dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) - dst_address_ip = '.'.join([dst_device_name.replace('R', ''), '0'] + dst_endpoint_name.split('/')) + dst_router_num = int(dst_device_name.replace('R', '')) + dst_router_id = ROUTER_ID.get(dst_device_name) + if dst_router_id is None: dst_router_id = '10.0.0.{:d}'.format(dst_router_num) + dst_address_ip = '10.{:d}.{:d}.{:d}'.format(y, x, dst_router_num) + + policy_AZ = 'srv_{:d}_a'.format(vlan_id) + policy_ZA = 'srv_{:d}_b'.format(vlan_id) config_rules = [ json_config_rule_set('/settings', { - 'mtu' : 1512, 'bgp_as' : bgp_as, - 'bgp_route_target': bgp_route_target, + 'route_distinguisher': route_distinguisher, }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_name, src_endpoint_name), { 'router_id' : src_router_id, 'route_distinguisher': route_distinguisher, 'sub_interface_index': vlan_id, 'vlan_id' : vlan_id, 'address_ip' : src_address_ip, 'address_prefix' : 16, + 'policy_AZ' : policy_AZ, + 'policy_ZA' : policy_ZA, }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_name, dst_endpoint_name), { 'router_id' : dst_router_id, 'route_distinguisher': route_distinguisher, 'sub_interface_index': vlan_id, 'vlan_id' : vlan_id, 'address_ip' : dst_address_ip, 'address_prefix' : 16, + 'policy_AZ' : policy_AZ, + 'policy_ZA' : policy_ZA, }), ] return json_service_l3nm_planned( @@ -357,9 +399,15 @@ class RequestGenerator: json_endpoint_id(json_device_id(src_device_uuid), src_endpoint_uuid), json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), ] + + availability = round(random.uniform(0.0, 99.9999), ndigits=5) + capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) + e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ - json_constraint_custom('bandwidth[gbps]', '10.0'), - json_constraint_custom('latency[ms]', '20.0'), + json_constraint_sla_availability(1, True, availability), + json_constraint_sla_capacity(capacity_gbps), + json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), + json_constraint_sla_latency(e2e_latency_ms), ] if request_type == RequestType.SLICE_L2NM: diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py index 775da1580a2a6521dbdc8fe32236c1f2adb4b3a7..773a37eac258f8b3c16c966464ced124d3c77c85 100644 --- a/src/load_generator/load_gen/RequestScheduler.py +++ b/src/load_generator/load_gen/RequestScheduler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, logging, pytz, random +import copy, logging, pytz, random, threading from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.blocking import BlockingScheduler @@ -21,6 +21,7 @@ from typing import Dict, Optional from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient +from .Constants import MAX_WORKER_THREADS from .DltTools import explore_entities_to_record, record_entities from .Parameters import Parameters from .RequestGenerator import RequestGenerator @@ -37,7 +38,7 @@ class RequestScheduler: self._scheduler = scheduler_class() self._scheduler.configure( jobstores = {'default': MemoryJobStore()}, - executors = {'default': ThreadPoolExecutor(max_workers=10)}, + executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)}, job_defaults = { 'coalesce': False, 'max_instances': 100, @@ -46,14 +47,18 @@ class RequestScheduler: timezone=pytz.utc) self._parameters = parameters self._generator = generator + self._running = threading.Event() + + @property + def num_generated(self): return min(self._generator.num_generated, self._parameters.num_requests) + + @property + def infinite_loop(self): return self._generator.infinite_loop + + @property + def running(self): return self._running.is_set() def _schedule_request_setup(self) -> None: - infinite_loop = self._parameters.num_requests == 0 - num_requests_generated = self._generator.num_requests_generated - 1 # because it first increases, then checks - if not infinite_loop and (num_requests_generated >= self._parameters.num_requests): - LOGGER.info('Generation Done!') - #self._scheduler.shutdown() - return iat = random.expovariate(1.0 / self._parameters.inter_arrival_time) run_date = datetime.utcnow() + timedelta(seconds=iat) self._scheduler.add_job( @@ -66,16 +71,24 @@ class RequestScheduler: self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc) def start(self): + self._running.set() self._schedule_request_setup() self._scheduler.start() def stop(self): self._scheduler.shutdown() + self._running.clear() def _request_setup(self) -> None: - self._schedule_request_setup() + completed,request = self._generator.compose_request() + if completed: + LOGGER.info('Generation Done!') + #self._scheduler.shutdown() + self._running.clear() + return + else: + self._schedule_request_setup() - request = self._generator.compose_request() if request is None: LOGGER.warning('No resources available to compose new request') return diff --git a/src/load_generator/service/Constants.py b/src/load_generator/service/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..6c339877c70363e874df278d6b5d29cc47a3be0f --- /dev/null +++ b/src/load_generator/service/Constants.py @@ -0,0 +1,27 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.load_generator_pb2 import RequestTypeEnum +from load_generator.load_gen.Constants import RequestType + +REQUEST_TYPE_MAP = { + RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM : RequestType.SERVICE_L2NM, + RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM : RequestType.SERVICE_L3NM, + RequestTypeEnum.REQUESTTYPE_SERVICE_MW : RequestType.SERVICE_MW, + RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI : RequestType.SERVICE_TAPI, + RequestTypeEnum.REQUESTTYPE_SLICE_L2NM : RequestType.SLICE_L2NM, + RequestTypeEnum.REQUESTTYPE_SLICE_L3NM : RequestType.SLICE_L3NM, +} + +REQUEST_TYPE_REVERSE_MAP = {v:k for k,v in REQUEST_TYPE_MAP.items()} diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py index c280581ddfab488249ff249e60118ec3030e0447..d66b0b2c10c5228e0c3d15759fc46b2c0770154d 100644 --- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py +++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py @@ -12,43 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional import grpc, logging +from typing import Optional from apscheduler.schedulers.background import BackgroundScheduler from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2 import Parameters, Status from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer -from load_generator.load_gen.Constants import RequestType -from load_generator.load_gen.Parameters import Parameters +from load_generator.load_gen.Parameters import Parameters as LoadGen_Parameters from load_generator.load_gen.RequestGenerator import RequestGenerator from load_generator.load_gen.RequestScheduler import RequestScheduler +from .Constants import REQUEST_TYPE_MAP, REQUEST_TYPE_REVERSE_MAP LOGGER = logging.getLogger(__name__) class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') - self._parameters = Parameters( - num_requests = 100, - request_types = [ - RequestType.SERVICE_L2NM, - RequestType.SERVICE_L3NM, - #RequestType.SERVICE_MW, - #RequestType.SERVICE_TAPI, - RequestType.SLICE_L2NM, - RequestType.SLICE_L3NM, - ], - offered_load = 50, - holding_time = 10, - do_teardown = True, - dry_mode = False, # in dry mode, no request is sent to TeraFlowSDN - record_to_dlt = False, # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT - dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False - ) self._generator : Optional[RequestGenerator] = None self._scheduler : Optional[RequestScheduler] = None LOGGER.debug('Servicer Created') - def Start(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def Start(self, request : Parameters, context : grpc.ServicerContext) -> Empty: + self._parameters = LoadGen_Parameters( + num_requests = request.num_requests, + request_types = [REQUEST_TYPE_MAP[rt] for rt in request.request_types], + offered_load = request.offered_load if request.offered_load > 1.e-12 else None, + holding_time = request.holding_time if request.holding_time > 1.e-12 else None, + inter_arrival_time = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None, + do_teardown = request.do_teardown, # if set, schedule tear down of requests + dry_mode = request.dry_mode, # in dry mode, no request is sent to TeraFlowSDN + record_to_dlt = request.record_to_dlt, # if set, upload changes to DLT + dlt_domain_id = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True) + ) + LOGGER.info('Initializing Generator...') self._generator = RequestGenerator(self._parameters) self._generator.initialize() @@ -58,6 +54,33 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): self._scheduler.start() return Empty() + def GetStatus(self, request : Empty, context : grpc.ServicerContext) -> Status: + if self._scheduler is None: + # not started + status = Status() + status.num_generated = 0 + status.infinite_loop = False + status.running = False + return status + + params = self._scheduler._parameters + request_types = [REQUEST_TYPE_REVERSE_MAP[rt] for rt in params.request_types] + + status = Status() + status.num_generated = self._scheduler.num_generated + status.infinite_loop = self._scheduler.infinite_loop + status.running = self._scheduler.running + status.parameters.num_requests = params.num_requests # pylint: disable=no-member + status.parameters.offered_load = params.offered_load # pylint: disable=no-member + status.parameters.holding_time = params.holding_time # pylint: disable=no-member + status.parameters.inter_arrival_time = params.inter_arrival_time # pylint: disable=no-member + status.parameters.do_teardown = params.do_teardown # pylint: disable=no-member + status.parameters.dry_mode = params.dry_mode # pylint: disable=no-member + status.parameters.record_to_dlt = params.record_to_dlt # pylint: disable=no-member + status.parameters.dlt_domain_id = params.dlt_domain_id # pylint: disable=no-member + status.parameters.request_types.extend(request_types) # pylint: disable=no-member + return status + def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty: if self._scheduler is not None: self._scheduler.stop() diff --git a/src/monitoring/.gitlab-ci.yml b/src/monitoring/.gitlab-ci.yml index ff620c53425f8f447dcb00ea03bc4c9f8ce4c5e9..7c3a14975d9c7bf7d5d46be917203338bea7f1f9 100644 --- a/src/monitoring/.gitlab-ci.yml +++ b/src/monitoring/.gitlab-ci.yml @@ -56,7 +56,7 @@ unit_test monitoring: - docker pull questdb/questdb - docker run --name questdb -d -p 9000:9000 -p 9009:9009 -p 8812:8812 -p 9003:9003 -e QDB_CAIRO_COMMIT_LAG=1000 -e QDB_CAIRO_MAX_UNCOMMITTED_ROWS=100000 --network=teraflowbridge --rm questdb/questdb - sleep 10 - - docker run --name $IMAGE_NAME -d -p 7070:7070 --env METRICSDB_HOSTNAME=questdb --env METRICSDB_ILP_PORT=9009 --env METRICSDB_REST_PORT=9000 --env METRICSDB_TABLE=monitoring -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - docker run --name $IMAGE_NAME -d -p 7070:7070 --env METRICSDB_HOSTNAME=questdb --env METRICSDB_ILP_PORT=9009 --env METRICSDB_REST_PORT=9000 --env METRICSDB_TABLE_MONITORING_KPIS=tfs_monitoring_kpis -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - sleep 30 - docker ps -a - docker logs $IMAGE_NAME diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py index 0bbce15094b87a17e332aad21bf34a565e8dd087..f408734df40c1bc5c16b7e108e3ce5a211165f71 100644 --- a/src/monitoring/service/MonitoringServiceServicerImpl.py +++ b/src/monitoring/service/MonitoringServiceServicerImpl.py @@ -47,7 +47,7 @@ MONITORING_INCLUDEKPI_COUNTER = Counter('monitoring_includekpi_counter', 'Monito METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME") METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT") METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT") -METRICSDB_TABLE = os.environ.get("METRICSDB_TABLE") +METRICSDB_TABLE_MONITORING_KPIS = os.environ.get("METRICSDB_TABLE_MONITORING_KPIS") class MonitoringServiceServicerImpl(MonitoringServiceServicer): def __init__(self, name_mapping : NameMapping): @@ -57,7 +57,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): self.management_db = ManagementDBTools.ManagementDB('monitoring.db') self.deviceClient = DeviceClient() self.metrics_db = MetricsDBTools.MetricsDB( - METRICSDB_HOSTNAME, name_mapping, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE) + METRICSDB_HOSTNAME, name_mapping, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE_MONITORING_KPIS) self.subs_manager = SubscriptionManager(self.metrics_db) self.alarm_manager = AlarmManager(self.metrics_db) LOGGER.info('MetricsDB initialized') @@ -592,8 +592,8 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id))) response.kpi_id.kpi_id.uuid = "NoID" else: - query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE} WHERE kpi_id = '{kpi_id}' " \ - f"LATEST ON timestamp PARTITION BY kpi_id" + query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE_MONITORING_KPIS} " \ + f"WHERE kpi_id = '{kpi_id}' LATEST ON timestamp PARTITION BY kpi_id" data = self.metrics_db.run_query(query) LOGGER.debug(data) if len(data) == 0: diff --git a/src/monitoring/service/__main__.py b/src/monitoring/service/__main__.py index fc460151b370c0eb5335787ed4677f7008881ad2..14f5609602c90eb9f54462e423af100997cf00d2 100644 --- a/src/monitoring/service/__main__.py +++ b/src/monitoring/service/__main__.py @@ -69,6 +69,8 @@ def main(): wait_for_environment_variables([ get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) signal.signal(signal.SIGINT, signal_handler) diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index 1428b0ed56dbb24a24af8fde42e4d073a48c931d..c883f9d141fc28645761641b0ccd10294b538bd2 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -75,7 +75,7 @@ os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_POR METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') METRICSDB_ILP_PORT = os.environ.get('METRICSDB_ILP_PORT') METRICSDB_REST_PORT = os.environ.get('METRICSDB_REST_PORT') -METRICSDB_TABLE = os.environ.get('METRICSDB_TABLE') +METRICSDB_TABLE_MONITORING_KPIS = os.environ.get('METRICSDB_TABLE_MONITORING_KPIS') LOGGER = logging.getLogger(__name__) @@ -193,7 +193,7 @@ def management_db(): def metrics_db(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name return monitoring_service.monitoring_servicer.metrics_db #_metrics_db = MetricsDBTools.MetricsDB( - # METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE) + # METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE_MONITORING_KPIS) #return _metrics_db @pytest.fixture(scope='session') diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py index a9fc4fa3d499f634f021d9ebbb4a749b4f8715c7..a6d39ee36949e075323613fceb71da5c77354fe5 100644 --- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py @@ -54,13 +54,15 @@ class KDisjointPathAlgorithm(_Algorithm): self.services_details.setdefault(service_key, service_details) for constraint in service.service_constraints: - if constraint.WhichOneof('constraint') == 'custom': + kind = constraint.WhichOneof('constraint') + + if kind == 'custom': constraint_type = constraint.custom.constraint_type if constraint_type not in CUSTOM_CONSTRAINTS: continue constraint_value = constraint.custom.constraint_value constraints[constraint_type] = constraint_value - if constraint.WhichOneof('constraint') == 'endpoint_location': + elif kind == 'endpoint_location': endpoint_id = constraint.endpoint_location.endpoint_id device_uuid = endpoint_id.device_id.device_uuid.uuid device_uuid = self.device_name_mapping.get(device_uuid, device_uuid) @@ -73,7 +75,7 @@ class KDisjointPathAlgorithm(_Algorithm): site_id = constraint.endpoint_location.location.region endpoints.setdefault((device_uuid, endpoint_uuid), dict())['site_id'] = site_id - if constraint.WhichOneof('constraint') == 'endpoint_priority': + elif kind == 'endpoint_priority': endpoint_id = constraint.endpoint_priority.endpoint_id device_uuid = endpoint_id.device_id.device_uuid.uuid device_uuid = self.device_name_mapping.get(device_uuid, device_uuid) @@ -82,9 +84,18 @@ class KDisjointPathAlgorithm(_Algorithm): priority = constraint.endpoint_priority.priority endpoints.setdefault((device_uuid, endpoint_uuid), dict())['priority'] = priority + elif kind == 'sla_capacity': + capacity_gbps = constraint.sla_capacity.capacity_gbps + constraints['bandwidth[gbps]'] = str(capacity_gbps) + + elif kind == 'sla_latency': + e2e_latency_ms = constraint.sla_latency.e2e_latency_ms + constraints['latency[ms]'] = str(e2e_latency_ms) + # TODO: ensure these constraints are provided in the request if 'bandwidth[gbps]' not in constraints: constraints['bandwidth[gbps]'] = '20.0' if 'latency[ms]' not in constraints: constraints['latency[ms]'] = '20.0' + #if 'jitter[us]' not in constraints: constraints['jitter[us]'] = '50.0' def get_link_from_endpoint(self, endpoint : Dict) -> Tuple[Dict, Link]: device_uuid = endpoint['device_id'] diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index bfb4da05fb57bef03fb94fc8973271ceb45f619a..ee85f0bb083500c655e78798bbcd2bd00e8a4501 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -73,17 +73,22 @@ def compose_latency_characteristics(fixed_latency_characteristic : str) -> Dict: return {'fixed-latency-characteristic': fixed_latency_characteristic} def compose_constraint(constraint : Constraint) -> Dict: - if constraint.WhichOneof('constraint') != 'custom': - str_constraint = grpc_message_to_json_string(constraint) - LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint)) - return None - constraint_type = constraint.custom.constraint_type - if constraint_type in {'diversity'}: - str_constraint = grpc_message_to_json_string(constraint) - LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint)) - return None - constraint_value = constraint.custom.constraint_value - return {'constraint_type': constraint_type, 'constraint_value': constraint_value} + kind = constraint.WhichOneof('constraint') + if kind == 'custom': + constraint_type = constraint.custom.constraint_type + if constraint_type in {'bandwidth[gbps]', 'latency[ms]', 'jitter[us]'}: + constraint_value = constraint.custom.constraint_value + return {'constraint_type': constraint_type, 'constraint_value': constraint_value} + elif kind == 'sla_capacity': + capacity_gbps = constraint.sla_capacity.capacity_gbps + return {'constraint_type': 'bandwidth[gbps]', 'constraint_value': str(capacity_gbps)} + elif kind == 'sla_latency': + e2e_latency_ms = constraint.sla_latency.e2e_latency_ms + return {'constraint_type': 'latency[ms]', 'constraint_value': str(e2e_latency_ms)} + + str_constraint = grpc_message_to_json_string(constraint) + LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint)) + return None def compose_device(grpc_device : Device) -> Dict: device_uuid = grpc_device.device_id.device_uuid.uuid @@ -144,6 +149,8 @@ def compose_service(grpc_service : Service) -> Dict: constraints.append({'constraint_type': 'bandwidth[gbps]', 'constraint_value': '20.0'}) if 'latency[ms]' not in constraint_types: constraints.append({'constraint_type': 'latency[ms]', 'constraint_value': '20.0'}) + #if 'jitter[us]' not in constraint_types: + # constraints.append({'constraint_type': 'jitter[us]', 'constraint_value': '50.0'}) return { 'serviceId': service_id, diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py index ca9764a34ef0550351c4a0ebcdbd041805c49dde..f26d74ce4c665663735bae69dcfb5a4e14311bfa 100644 --- a/src/pathcomp/frontend/tests/Objects_A_B_C.py +++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py @@ -13,7 +13,7 @@ # limitations under the License. from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id from common.tools.object_factory.EndPoint import json_endpoints @@ -97,8 +97,8 @@ LINK_C2_C3_ID, LINK_C2_C3 = compose_link(DEVICE_C2_ENDPOINTS[1], DEVICE_C3_ENDPO # ----- Service -------------------------------------------------------------------------------------------------------- SERVICE_A1_B1 = compose_service(DEVICE_A1_ENDPOINTS[2], DEVICE_B1_ENDPOINTS[2], constraints=[ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 12.0), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(12.0), ]) # ----- Containers ----------------------------------------------------------------------------------------------------- diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py index 1d057c10edcea30e1bf38f63d8a1ad0c6a0a4d46..9ee784e1f76026416bca9824aa8e54e2c4f874f2 100644 --- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py +++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py @@ -13,7 +13,7 @@ # limitations under the License. from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -139,8 +139,8 @@ LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4 # ----- Service -------------------------------------------------------------------------------------------------------- SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 20.0), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(20.0), ]) # ----- Containers ----------------------------------------------------------------------------------------------------- diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py index 8f6e88719f4019edbeea36c7b4a641fbd7abbea4..71510d088746bd791e4671686dd5114874dd5a2a 100644 --- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py +++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py @@ -14,7 +14,7 @@ import uuid from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -149,8 +149,8 @@ LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3 # ----- Service -------------------------------------------------------------------------------------------------------- SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 20.0), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(20.0), ]) # ----- Containers ----------------------------------------------------------------------------------------------------- diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py index fd14c8a7aed4ec6e1a1c73aaa9425008abe7db60..8088259b80b8ade2669568b74f004dcfa631dd9c 100644 --- a/src/pathcomp/frontend/tests/test_unitary.py +++ b/src/pathcomp/frontend/tests/test_unitary.py @@ -18,12 +18,11 @@ from common.proto.pathcomp_pb2 import PathCompRequest from common.tools.grpc.Tools import grpc_message_to_json from common.tools.object_factory.Constraint import ( json_constraint_custom, json_constraint_endpoint_location_region, json_constraint_endpoint_priority, - json_constraint_sla_availability) + json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_latency) from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Service import json_service_l3nm_planned from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient from pathcomp.frontend.client.PathCompClient import PathCompClient # Scenarios: @@ -90,8 +89,8 @@ def test_request_service_shortestpath( request_services = copy.deepcopy(SERVICES) #request_services[0]['service_constraints'] = [ - # json_constraint_custom('bandwidth[gbps]', 1000.0), - # json_constraint_custom('latency[ms]', 1200.0), + # json_constraint_sla_capacity(1000.0), + # json_constraint_sla_latency(1200.0), #] pathcomp_request = PathCompRequest(services=request_services) pathcomp_request.shortest_path.Clear() # hack to select the shortest path algorithm that has no attributes @@ -202,9 +201,9 @@ def test_request_service_kdisjointpath( ] endpoint_ids, constraints = [], [ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 12.0), - json_constraint_sla_availability(2, True), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(12.0), + json_constraint_sla_availability(2, True, 50.0), json_constraint_custom('diversity', {'end-to-end-diverse': 'all-other-accesses'}), ] diff --git a/src/pathcomp/misc/example-results-kdisjointpaths.json b/src/pathcomp/misc/example-results-kdisjointpaths.json index 9eda25d484e45db53471ea3f655d511cbcc42c18..c1dbf3a3c7bc6335f0d0c765b6622ce070b7774e 100644 --- a/src/pathcomp/misc/example-results-kdisjointpaths.json +++ b/src/pathcomp/misc/example-results-kdisjointpaths.json @@ -64,8 +64,8 @@ ], "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, "service_constraints": [ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "12.0"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 12.0}} ], "service_config": {"config_rules": []} } diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml index 02cdf3ca8d00d3ea10862488d54177aa163056b8..40516e5cc3fdd1fb993a1248ad36ea7551edfc40 100644 --- a/src/policy/target/kubernetes/kubernetes.yml +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -1,4 +1,4 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 622abeee860cdb6ce8153b7def9fb91ea1117277..0b2e0760161c109a2ba6a5feecc931e8bcf5c14f 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -38,8 +38,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CreateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: - LOGGER.info('[CreateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - if len(request.service_endpoint_ids) > 0: unexpected_endpoints = [] for service_endpoint_id in request.service_endpoint_ids: @@ -85,8 +83,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: - LOGGER.info('[UpdateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - # Set service status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the service is # being modified. context_client = ContextClient() @@ -112,27 +108,30 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): service_id_with_uuids = context_client.SetService(service) service_with_uuids = context_client.GetService(service_id_with_uuids) - num_disjoint_paths = None + num_disjoint_paths = 0 for constraint in request.service_constraints: if constraint.WhichOneof('constraint') == 'sla_availability': num_disjoint_paths = constraint.sla_availability.num_disjoint_paths break + num_disjoint_paths = 1 if num_disjoint_paths is None or num_disjoint_paths == 0 else num_disjoint_paths + num_expected_endpoints = num_disjoint_paths * 2 + tasks_scheduler = TasksScheduler(self.service_handler_factory) - if len(service_with_uuids.service_endpoint_ids) >= (2 if num_disjoint_paths is None else 4): + if len(service_with_uuids.service_endpoint_ids) >= num_expected_endpoints: pathcomp_request = PathCompRequest() pathcomp_request.services.append(service_with_uuids) # pylint: disable=no-member - if num_disjoint_paths is None: + if num_disjoint_paths is None or num_disjoint_paths in {0, 1}: pathcomp_request.shortest_path.Clear() # pylint: disable=no-member else: pathcomp_request.k_disjoint_path.num_disjoint = num_disjoint_paths # pylint: disable=no-member - LOGGER.info('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) + LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) pathcomp = PathCompClient() pathcomp_reply = pathcomp.Compute(pathcomp_request) pathcomp.close() - LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) + LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) # Feed TaskScheduler with this path computation reply. TaskScheduler identifies inter-dependencies among # the services and connections retrieved and produces a schedule of tasks (an ordered list of tasks to be @@ -144,8 +143,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: - LOGGER.info('[DeleteService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - context_client = ContextClient() # Set service status to "SERVICESTATUS_PENDING_REMOVAL" to ensure rest of components are aware the service is diff --git a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py index c2ea6e213ee8d18b4507089fb2762c913e03039a..bbd91df93b15c669878dce092d415a678beafa8a 100644 --- a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py @@ -20,58 +20,64 @@ def setup_config_rules( service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: + + if service_settings is None: return [] + if endpoint_settings is None: return [] + + json_settings : Dict = service_settings.value + json_endpoint_settings : Dict = endpoint_settings.value json_settings : Dict = {} if service_settings is None else service_settings.value json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - mtu = json_settings.get('mtu', 1450 ) # 1512 + #mtu = json_settings.get('mtu', 1450 ) # 1512 #address_families = json_settings.get('address_families', [] ) # ['IPV4'] #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' - circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' + remote_router = json_endpoint_settings.get('remote_router', '5.5.5.5') # '5.5.5.5' + circuit_id = json_endpoint_settings.get('circuit_id', '111' ) # '111' + if_cirid_name = '{:s}.{:s}'.format(endpoint_name, str(circuit_id)) network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) connection_point_id = 'VC-1' json_config_rules = [ - json_config_rule_set( - '/network_instance[default]', - {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), - - json_config_rule_set( - '/network_instance[default]/protocols[OSPF]', - {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), - - json_config_rule_set( - '/network_instance[default]/protocols[STATIC]', - {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), - + json_config_rule_set( '/network_instance[{:s}]'.format(network_instance_name), - {'name': network_instance_name, 'type': 'L2VSI'}), + {'name': network_instance_name, + 'type': 'L2VSI'}), json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), - {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}), + '/interface[{:s}]/subinterface[0]'.format(if_cirid_name), + {'name': if_cirid_name, + 'type': 'l2vlan', + 'index': sub_interface_index, + 'vlan_id': vlan_id}), json_config_rule_set( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, - 'subinterface': sub_interface_index}), + {'name': network_instance_name, + 'id': if_cirid_name, + 'interface': if_cirid_name, + 'subinterface': 0 + }), json_config_rule_set( '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), - {'name': network_instance_name, 'connection_point': connection_point_id, 'VC_ID': circuit_id, - 'remote_system': remote_router}), + {'name': network_instance_name, + 'connection_point': connection_point_id, + 'VC_ID': circuit_id, + 'remote_system': remote_router + }), ] return json_config_rules @@ -88,9 +94,9 @@ def teardown_config_rules( #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + #sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 #vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 @@ -102,33 +108,16 @@ def teardown_config_rules( connection_point_id = 'VC-1' json_config_rules = [ - json_config_rule_delete( - '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), - {'name': network_instance_name, 'connection_point': connection_point_id}), + - json_config_rule_delete( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, - 'subinterface': sub_interface_index}), - - json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), - {'name': if_cirid_name, 'index': sub_interface_index}), json_config_rule_delete( '/network_instance[{:s}]'.format(network_instance_name), {'name': network_instance_name}), - - json_config_rule_delete( - '/network_instance[default]/protocols[STATIC]', - {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), - - json_config_rule_delete( - '/network_instance[default]/protocols[OSPF]', - {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), - + json_config_rule_delete( - '/network_instance[default]', - {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), + '/interface[{:s}]'.format(if_cirid_name), { + 'name': if_cirid_name, + }), ] return json_config_rules diff --git a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py index 6eb139dbd03b4dff781a08548c03627512501ab5..d511c8947ecb43052fd154ab3ce3293a468b4263 100644 --- a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py @@ -75,10 +75,12 @@ class L2NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) @@ -110,10 +112,12 @@ class L2NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint))) diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py index 903ad8cd5ae442a03d54fb49083f3837a3c8187c..351efe5a5f32db99c36846ad2fd96e2c8567148e 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py @@ -21,120 +21,162 @@ def setup_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: - json_settings : Dict = {} if service_settings is None else service_settings.value - json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value + if service_settings is None: return [] + if endpoint_settings is None: return [] + + json_settings : Dict = service_settings.value + json_endpoint_settings : Dict = endpoint_settings.value service_short_uuid = service_uuid.split('-')[-1] network_instance_name = '{:s}-NetInst'.format(service_short_uuid) network_interface_desc = '{:s}-NetIf'.format(service_uuid) network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) - mtu = json_settings.get('mtu', 1450 ) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - bgp_as = json_settings.get('bgp_as', 0 ) # 65000 - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - - #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + bgp_as = json_settings.get('bgp_as', 65000 ) # 65000 + route_distinguisher = json_settings.get('route_distinguisher', '0:0' ) # '60001:801' sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + policy_import = json_endpoint_settings.get('policy_AZ', '2' ) # 2 + policy_export = json_endpoint_settings.get('policy_ZA', '7' ) # 30 + if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) json_config_rules = [ + # Configure Interface (not used) + #json_config_rule_set( + # '/interface[{:s}]'.format(endpoint_name), { + # 'name': endpoint_name, 'description': network_interface_desc, 'mtu': mtu, + #}), + + #Create network instance json_config_rule_set( '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF', + 'name': network_instance_name, + 'description': network_interface_desc, + 'type': 'L3VRF', 'route_distinguisher': route_distinguisher, - #'router_id': router_id, 'address_families': address_families, - }), - json_config_rule_set( - '/interface[{:s}]'.format(endpoint_name), { - 'name': endpoint_name, 'description': network_interface_desc, 'mtu': mtu, - }), - json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { - 'name': endpoint_name, 'index': sub_interface_index, - 'description': network_subinterface_desc, 'vlan_id': vlan_id, - 'address_ip': address_ip, 'address_prefix': address_prefix, - }), - json_config_rule_set( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_name, - 'subinterface': sub_interface_index, + #'router_id': router_id, + #'address_families': address_families, }), + + #Add BGP protocol to network instance json_config_rule_set( '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as, + 'name': network_instance_name, + 'protocol_name': 'BGP', + 'identifier': 'BGP', + 'as': bgp_as, + 'router_id': router_id, }), + + #Add DIRECTLY CONNECTED protocol to network instance json_config_rule_set( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(network_instance_name), { + 'name': network_instance_name, + 'identifier': 'DIRECTLY_CONNECTED', + 'protocol_name': 'DIRECTLY_CONNECTED', }), + + + #Add STATIC protocol to network instance json_config_rule_set( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + '/network_instance[{:s}]/protocols[STATIC]'.format(network_instance_name), { + 'name': network_instance_name, + 'identifier': 'STATIC', + 'protocol_name': 'STATIC', }), + + #Create interface with subinterface json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + '/interface[{:s}]/subinterface[{:d}]'.format(if_subif_name, sub_interface_index), { + 'name' : if_subif_name, + 'type' :'l3ipvlan', + 'mtu' : mtu, + 'index' : sub_interface_index, + 'description' : network_subinterface_desc, + 'vlan_id' : vlan_id, + 'address_ip' : address_ip, + 'address_prefix': address_prefix, }), + + #Associate interface to network instance json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { + 'name' : network_instance_name, + 'id' : if_subif_name, + 'interface' : if_subif_name, + 'subinterface': sub_interface_index, + }), + + #Create routing policy json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), + '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import,route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'ext_community_member' : route_distinguisher, }), json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(policy_import, policy_import), { + 'policy_name' : policy_import, + 'statement_name' : 'stm_{:s}'.format(policy_import), + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'policy_result' : 'ACCEPT_ROUTE', }), + + #Associate routing policy to network instance json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name), + '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_import), { + 'name' : network_instance_name, + 'import_policy': policy_import, }), + + #Create routing policy json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'ext_community_member' : route_distinguisher, }), json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(policy_export, policy_export), { + 'policy_name' : policy_export, + 'statement_name' : 'stm_{:s}'.format(policy_export), + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'policy_result' : 'ACCEPT_ROUTE', }), + + #Associate routing policy to network instance json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), + '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_export),{ + 'name' : network_instance_name, + 'export_policy': policy_export, }), + + #Create table connections json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(network_instance_name), { + 'name' : network_instance_name, + 'src_protocol' : 'DIRECTLY_CONNECTED', + 'dst_protocol' : 'BGP', + 'address_family' : 'IPV4', + 'default_import_policy': 'ACCEPT_ROUTE', }), + json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name), + '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { + 'name' : network_instance_name, + 'src_protocol' : 'STATIC', + 'dst_protocol' : 'BGP', + 'address_family' : 'IPV4', + 'default_import_policy': 'ACCEPT_ROUTE', }), - ] + ] return json_config_rules def teardown_config_rules( @@ -142,108 +184,86 @@ def teardown_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: + if service_settings is None: return [] + if endpoint_settings is None: return [] + json_settings : Dict = {} if service_settings is None else service_settings.value json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - #mtu = json_settings.get('mtu', 1450 ) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + service_short_uuid = service_uuid.split('-')[-1] + network_instance_name = '{:s}-NetInst'.format(service_short_uuid) + #network_interface_desc = '{:s}-NetIf'.format(service_uuid) + #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) + #mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + #bgp_as = json_settings.get('bgp_as', 65000 ) # 65000 + route_distinguisher = json_settings.get('route_distinguisher', '0:0' ) # '60001:801' + #sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + policy_import = json_endpoint_settings.get('policy_AZ', '2' ) # 2 + policy_export = json_endpoint_settings.get('policy_ZA', '7' ) # 30 if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) - service_short_uuid = service_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(service_short_uuid) - #network_interface_desc = '{:s}-NetIf'.format(service_uuid) - #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) json_config_rules = [ + #Delete table connections json_config_rule_delete( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, - }), - json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { - 'name': endpoint_name, 'index': sub_interface_index, - }), - json_config_rule_delete( - '/interface[{:s}]'.format(endpoint_name), { - 'name': endpoint_name, - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(network_instance_name),{ + 'name' : network_instance_name, + 'src_protocol' : 'DIRECTLY_CONNECTED', + 'dst_protocol' : 'BGP', + 'address_family': 'IPV4', }), + + json_config_rule_delete( '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', + 'name' : network_instance_name, + 'src_protocol' : 'STATIC', + 'dst_protocol' : 'BGP', 'address_family': 'IPV4', }), + + #Delete export routing policy + json_config_rule_delete( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, + '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { + 'policy_name': '{:s}_export'.format(network_instance_name), }), json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', + '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_export), }), + + #Delete import routing policy + json_config_rule_delete( '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { 'policy_name': '{:s}_import'.format(network_instance_name), }), json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', + '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_import), }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + + #Delete interface; automatically deletes: + # - /interface[]/subinterface[] + json_config_rule_delete('/interface[{:s}]'.format(if_subif_name), + { + 'name': if_subif_name, }), - json_config_rule_delete( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name + + #Delete network instance; automatically deletes: + # - /network_instance[]/interface[] + # - /network_instance[]/protocols[] + # - /network_instance[]/inter_instance_policies[] + json_config_rule_delete('/network_instance[{:s}]'.format(network_instance_name), + { + 'name': network_instance_name }), ] return json_config_rules diff --git a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py index e3af6302dc996bb1582f2c339b3296800aa9d655..b2639ddad58e4c453f1b1e2dc87fce8861ad79a2 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py @@ -75,10 +75,12 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) @@ -110,10 +112,12 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint))) diff --git a/src/service/service/service_handlers/p4/p4_service_handler.py b/src/service/service/service_handlers/p4/p4_service_handler.py index 500c50378401c016a6cf30c73c78149e2097d2b8..6f2cfb5a9bc4dac991eecd14ba7b6eb1218bdaa2 100644 --- a/src/service/service/service_handlers/p4/p4_service_handler.py +++ b/src/service/service/service_handlers/p4/p4_service_handler.py @@ -47,7 +47,7 @@ def create_rule_set(endpoint_a, endpoint_b): } ] } -) + ) def create_rule_del(endpoint_a, endpoint_b): return json_config_rule_delete( @@ -68,7 +68,17 @@ def create_rule_del(endpoint_a, endpoint_b): } ] } -) + ) + +def find_names(uuid_a, uuid_b, device_endpoints): + endpoint_a, endpoint_b = None, None + for endpoint in device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == uuid_a: + endpoint_a = endpoint.name + elif endpoint.endpoint_id.endpoint_uuid.uuid == uuid_b: + endpoint_b = endpoint.name + + return (endpoint_a, endpoint_b) class P4ServiceHandler(_ServiceHandler): def __init__(self, @@ -127,12 +137,21 @@ class P4ServiceHandler(_ServiceHandler): device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) del device.device_config.config_rules[:] + + # Find names from uuids + (endpoint_a, endpoint_b) = find_names(matched_endpoint_uuid, endpoint_uuid, device.device_endpoints) + if endpoint_a is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + if endpoint_b is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) # One way - rule = create_rule_set(matched_endpoint_uuid, endpoint_uuid) + rule = create_rule_set(endpoint_a, endpoint_b) device.device_config.config_rules.append(ConfigRule(**rule)) # The other way - rule = create_rule_set(endpoint_uuid, matched_endpoint_uuid) + rule = create_rule_set(endpoint_b, endpoint_a) device.device_config.config_rules.append(ConfigRule(**rule)) self.__task_executor.configure_device(device) @@ -189,11 +208,20 @@ class P4ServiceHandler(_ServiceHandler): del device.device_config.config_rules[:] + # Find names from uuids + (endpoint_a, endpoint_b) = find_names(matched_endpoint_uuid, endpoint_uuid, device.device_endpoints) + if endpoint_a is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + if endpoint_b is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) + # One way - rule = create_rule_del(matched_endpoint_uuid, endpoint_uuid) + rule = create_rule_del(endpoint_a, endpoint_b) device.device_config.config_rules.append(ConfigRule(**rule)) # The other way - rule = create_rule_del(endpoint_uuid, matched_endpoint_uuid) + rule = create_rule_del(endpoint_b, endpoint_a) device.device_config.config_rules.append(ConfigRule(**rule)) self.__task_executor.configure_device(device) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index f55527e4756022fc4941605f54ab82b74c0937f0..fbc554aa261cbc68009258d322aa01d52bfe760d 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -130,7 +130,7 @@ class TasksScheduler: self._dag.add(connection_key, service_key_done) t1 = time.time() - LOGGER.info('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0)) + LOGGER.debug('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0)) def compose_from_service(self, service : Service, is_delete : bool = False) -> None: t0 = time.time() @@ -196,11 +196,11 @@ class TasksScheduler: raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item))) t1 = time.time() - LOGGER.info('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) def execute_all(self, dry_run : bool = False) -> None: ordered_task_keys = list(self._dag.static_order()) - LOGGER.info('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys))) + LOGGER.debug('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys))) results = [] for task_key in ordered_task_keys: @@ -208,5 +208,5 @@ class TasksScheduler: succeeded = True if dry_run else task.execute() results.append(succeeded) - LOGGER.info('[execute_all] results={:s}'.format(str(results))) + LOGGER.debug('[execute_all] results={:s}'.format(str(results))) return zip(ordered_task_keys, results) diff --git a/src/slice/client/SliceClient.py b/src/slice/client/SliceClient.py index a3e5d649032bbf939f9ba6d812b270ca3384cc06..792a2037f0a7cb47d6f0c2e7969708425b57b3a6 100644 --- a/src/slice/client/SliceClient.py +++ b/src/slice/client/SliceClient.py @@ -65,3 +65,17 @@ class SliceClient: response = self.stub.DeleteSlice(request) LOGGER.debug('DeleteSlice result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def OrderSliceWithSLA(self, request : Slice) -> SliceId: + LOGGER.debug('OrderSliceWithSLA request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.OrderSliceWithSLA(request) + LOGGER.debug('OrderSliceWithSLA result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RunSliceGrouping(self, request : Empty) -> Empty: + LOGGER.debug('RunSliceGrouping request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RunSliceGrouping(request) + LOGGER.debug('RunSliceGrouping result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/slice/requirements.in b/src/slice/requirements.in index daef740da4729659fb3117eadff31994acdf5746..854c71a5948e91077fba4561f961083ed90b0861 100644 --- a/src/slice/requirements.in +++ b/src/slice/requirements.in @@ -12,5 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. - #deepdiff==5.8.* +numpy==1.23.* +pandas==1.5.* +questdb==1.0.1 +requests==2.27.* +scikit-learn==1.1.* diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 21d820089aad9531834187e129d893e90f3c93a8..acec3ae303266714ae7f50c5c0d78fc41d350ea1 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -24,10 +24,11 @@ from common.tools.grpc.ConfigRules import copy_config_rules from common.tools.grpc.Constraints import copy_constraints from common.tools.grpc.EndPointIds import copy_endpoint_ids from common.tools.grpc.ServiceIds import update_service_ids -from common.tools.grpc.Tools import grpc_message_to_json_string +#from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from interdomain.client.InterdomainClient import InterdomainClient from service.client.ServiceClient import ServiceClient +from .slice_grouper.SliceGrouper import SliceGrouper LOGGER = logging.getLogger(__name__) @@ -36,6 +37,7 @@ METRICS_POOL = MetricsPool('Slice', 'RPC') class SliceServiceServicerImpl(SliceServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') + self._slice_grouper = SliceGrouper() LOGGER.debug('Servicer Created') def create_update(self, request : Slice) -> SliceId: @@ -62,7 +64,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): # unable to identify the kind of slice; just update endpoints, constraints and config rules # update the slice in database, and return # pylint: disable=no-member - return context_client.SetSlice(slice_rw) + reply = context_client.SetSlice(slice_rw) + context_client.close() + return reply slice_with_uuids = context_client.GetSlice(slice_id_with_uuids) @@ -80,8 +84,13 @@ class SliceServiceServicerImpl(SliceServiceServicer): slice_active.CopyFrom(slice_) slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member context_client.SetSlice(slice_active) + interdomain_client.close() + context_client.close() return slice_id + if self._slice_grouper.is_enabled: + grouped = self._slice_grouper.group(slice_with_uuids) # pylint: disable=unused-variable + # Local domain slice service_id = ServiceId() # pylint: disable=no-member @@ -109,13 +118,13 @@ class SliceServiceServicerImpl(SliceServiceServicer): service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN for config_rule in request.slice_config.config_rules: - LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) + #LOGGER.debug('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) config_rule_kind = config_rule.WhichOneof('config_rule') - LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind))) + #LOGGER.debug('config_rule_kind: {:s}'.format(str(config_rule_kind))) if config_rule_kind != 'custom': continue custom = config_rule.custom resource_key = custom.resource_key - LOGGER.info('resource_key: {:s}'.format(str(resource_key))) + #LOGGER.debug('resource_key: {:s}'.format(str(resource_key))) # TODO: parse resource key with regular expression, e.g.: # m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s) @@ -123,21 +132,21 @@ class SliceServiceServicerImpl(SliceServiceServicer): if not resource_key.endswith('/settings'): continue resource_value = json.loads(custom.resource_value) - LOGGER.info('resource_value: {:s}'.format(str(resource_value))) + #LOGGER.debug('resource_value: {:s}'.format(str(resource_value))) if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: if (resource_value.get('address_ip') is not None and \ resource_value.get('address_prefix') is not None): service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM - LOGGER.info('is L3') + #LOGGER.debug('is L3') else: service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM - LOGGER.info('is L2') + #LOGGER.debug('is L2') break if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM - LOGGER.info('assume L2') + #LOGGER.debug('assume L2') service_client.UpdateService(service_request) @@ -154,6 +163,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): slice_active.CopyFrom(slice_) slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member context_client.SetSlice(slice_active) + + service_client.close() + context_client.close() return slice_id @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) @@ -190,6 +202,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): try: _slice = context_client.GetSlice(request) except: # pylint: disable=bare-except + context_client.close() return Empty() if is_multi_domain(context_client, _slice.slice_endpoint_ids): @@ -202,6 +215,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): current_slice.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member context_client.SetSlice(current_slice) + if self._slice_grouper.is_enabled: + ungrouped = self._slice_grouper.ungroup(current_slice) # pylint: disable=unused-variable + service_client = ServiceClient() for service_id in _slice.slice_service_ids: current_slice = Slice() @@ -211,6 +227,8 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client.UnsetSlice(current_slice) service_client.DeleteService(service_id) + service_client.close() context_client.RemoveSlice(request) + context_client.close() return Empty() diff --git a/src/slice/service/slice_grouper/Constants.py b/src/slice/service/slice_grouper/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..2edd853a2202fc64f107ea8c6688d19d6ab2692e --- /dev/null +++ b/src/slice/service/slice_grouper/Constants.py @@ -0,0 +1,22 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: define by means of settings +SLICE_GROUPS = [ + ('bronze', 10.0, 10.0), # Bronze (10%, 10Gb/s) + ('silver', 30.0, 40.0), # Silver (30%, 40Gb/s) + ('gold', 70.0, 50.0), # Gold (70%, 50Gb/s) + ('platinum', 99.0, 100.0), # Platinum (99%, 100Gb/s) +] +SLICE_GROUP_NAMES = {slice_group[0] for slice_group in SLICE_GROUPS} diff --git a/src/slice/service/slice_grouper/MetricsExporter.py b/src/slice/service/slice_grouper/MetricsExporter.py new file mode 100644 index 0000000000000000000000000000000000000000..3708641eef64e100fae18e875a4fbc4896357057 --- /dev/null +++ b/src/slice/service/slice_grouper/MetricsExporter.py @@ -0,0 +1,126 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, logging, os, requests +from typing import Any, Literal, Union +from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module + +LOGGER = logging.getLogger(__name__) + +MAX_RETRIES = 10 +DELAY_RETRIES = 0.5 + +MSG_EXPORT_EXECUTED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed' +MSG_EXPORT_FAILED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...' +MSG_REST_BAD_STATUS = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}' +MSG_REST_EXECUTED = '[rest_request] Query({:s}) executed, result: {:s}' +MSG_REST_FAILED = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...' +MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}' + +METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') +METRICSDB_ILP_PORT = int(os.environ.get('METRICSDB_ILP_PORT')) +METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT')) +METRICSDB_TABLE_SLICE_GROUPS = os.environ.get('METRICSDB_TABLE_SLICE_GROUPS') + +COLORS = { + 'platinum': '#E5E4E2', + 'gold' : '#FFD700', + 'silver' : '#808080', + 'bronze' : '#CD7F32', +} +DEFAULT_COLOR = '#000000' # black + +SQL_MARK_DELETED = "UPDATE {:s} SET is_deleted='true' WHERE slice_uuid='{:s}';" + +class MetricsExporter(): + def create_table(self) -> None: + sql_query = ' '.join([ + 'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)), + ','.join([ + 'timestamp TIMESTAMP', + 'slice_uuid SYMBOL', + 'slice_group SYMBOL', + 'slice_color SYMBOL', + 'is_deleted SYMBOL', + 'slice_availability DOUBLE', + 'slice_capacity_center DOUBLE', + 'slice_capacity DOUBLE', + ]), + ') TIMESTAMP(timestamp);' + ]) + try: + result = self.rest_request(sql_query) + if not result: raise Exception + LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS))) + except Exception as e: + LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e))) + raise + + def export_point( + self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float, + is_center : bool = False + ) -> None: + dt_timestamp = datetime.datetime.utcnow() + slice_color = COLORS.get(slice_group, DEFAULT_COLOR) + symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color, is_deleted='false') + columns = dict(slice_availability=slice_availability) + columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity + + for retry in range(MAX_RETRIES): + try: + with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender: + sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp) + sender.flush() + LOGGER.debug(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns))) + return + except (Exception, IngressError): # pylint: disable=broad-except + LOGGER.exception(MSG_EXPORT_FAILED.format( + str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) + + def delete_point(self, slice_uuid : str) -> None: + sql_query = SQL_MARK_DELETED.format(str(METRICSDB_TABLE_SLICE_GROUPS), slice_uuid) + try: + result = self.rest_request(sql_query) + if not result: raise Exception + LOGGER.debug('Point {:s} deleted'.format(str(slice_uuid))) + except Exception as e: + LOGGER.warning('Point {:s} cannot be deleted. {:s}'.format(str(slice_uuid), str(e))) + raise + + def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]: + url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT) + params = {'query': rest_query, 'fmt': 'json'} + + for retry in range(MAX_RETRIES): + try: + response = requests.get(url, params=params) + status_code = response.status_code + if status_code not in {200}: + str_content = response.content.decode('UTF-8') + raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content)) + + json_response = response.json() + if 'ddl' in json_response: + LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl']))) + return True + elif 'dataset' in json_response: + LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset']))) + return json_response['dataset'] + + except Exception: # pylint: disable=broad-except + LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) diff --git a/src/slice/service/slice_grouper/SliceGrouper.py b/src/slice/service/slice_grouper/SliceGrouper.py new file mode 100644 index 0000000000000000000000000000000000000000..735d028993eb11e83138caebde1e32ebc830093f --- /dev/null +++ b/src/slice/service/slice_grouper/SliceGrouper.py @@ -0,0 +1,94 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pandas, threading +from typing import Dict, Optional, Tuple +from sklearn.cluster import KMeans +from common.proto.context_pb2 import Slice +from common.tools.grpc.Tools import grpc_message_to_json_string +from .Constants import SLICE_GROUPS +from .MetricsExporter import MetricsExporter +from .Tools import ( + add_slice_to_group, create_slice_groups, get_slice_grouping_parameters, is_slice_grouping_enabled, + remove_slice_from_group) + +LOGGER = logging.getLogger(__name__) + +class SliceGrouper: + def __init__(self) -> None: + self._lock = threading.Lock() + self._is_enabled = is_slice_grouping_enabled() + if not self._is_enabled: return + + metrics_exporter = MetricsExporter() + metrics_exporter.create_table() + + self._slice_groups = create_slice_groups(SLICE_GROUPS) + + # Initialize and fit K-Means with the pre-defined clusters we want, i.e., one per slice group + df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps']) + k_means = KMeans(n_clusters=df_groups.shape[0]) + k_means.fit(df_groups[['availability', 'capacity_gbps']]) + df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity_gbps']]) + self._k_means = k_means + self._df_groups = df_groups + + self._group_mapping : Dict[str, Dict] = { + group['name']:{k:v for k,v in group.items() if k != 'name'} + for group in list(df_groups.to_dict('records')) + } + + label_to_group = {} + for group_name,group_attrs in self._group_mapping.items(): + label = group_attrs['label'] + availability = group_attrs['availability'] + capacity_gbps = group_attrs['capacity_gbps'] + metrics_exporter.export_point( + group_name, group_name, availability, capacity_gbps, is_center=True) + label_to_group[label] = group_name + self._label_to_group = label_to_group + + def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]: + with self._lock: + grouping_parameters = get_slice_grouping_parameters(slice_obj) + LOGGER.debug('[_select_group] grouping_parameters={:s}'.format(str(grouping_parameters))) + if grouping_parameters is None: return None + + sample = pandas.DataFrame([grouping_parameters], columns=['availability', 'capacity_gbps']) + sample['label'] = self._k_means.predict(sample) + sample = sample.to_dict('records')[0] # pylint: disable=unsubscriptable-object + LOGGER.debug('[_select_group] sample={:s}'.format(str(sample))) + label = sample['label'] + availability = sample['availability'] + capacity_gbps = sample['capacity_gbps'] + group_name = self._label_to_group[label] + LOGGER.debug('[_select_group] group_name={:s}'.format(str(group_name))) + return group_name, availability, capacity_gbps + + @property + def is_enabled(self): return self._is_enabled + + def group(self, slice_obj : Slice) -> bool: + LOGGER.debug('[group] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj))) + selected_group = self._select_group(slice_obj) + LOGGER.debug('[group] selected_group={:s}'.format(str(selected_group))) + if selected_group is None: return False + return add_slice_to_group(slice_obj, selected_group) + + def ungroup(self, slice_obj : Slice) -> bool: + LOGGER.debug('[ungroup] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj))) + selected_group = self._select_group(slice_obj) + LOGGER.debug('[ungroup] selected_group={:s}'.format(str(selected_group))) + if selected_group is None: return False + return remove_slice_from_group(slice_obj, selected_group) diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..ca957f3c7760eb65b649d22ecb5b57dee3e08dab --- /dev/null +++ b/src/slice/service/slice_grouper/Tools.py @@ -0,0 +1,177 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Optional, Set, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME +from common.Settings import get_setting +from common.method_wrappers.ServiceExceptions import NotFoundException +from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.Slice import get_slice +from context.client.ContextClient import ContextClient +from slice.service.slice_grouper.MetricsExporter import MetricsExporter + +SETTING_NAME_SLICE_GROUPING = 'SLICE_GROUPING' +TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'} + +NO_ISOLATION = IsolationLevelEnum.NO_ISOLATION + +def is_slice_grouping_enabled() -> bool: + is_enabled = get_setting(SETTING_NAME_SLICE_GROUPING, default=None) + if is_enabled is None: return False + str_is_enabled = str(is_enabled).upper() + return str_is_enabled in TRUE_VALUES + +def create_slice_group( + context_uuid : str, slice_name : str, capacity_gbps : float, availability : float +) -> Slice: + slice_group_obj = Slice() + slice_group_obj.slice_id.context_id.context_uuid.uuid = context_uuid # pylint: disable=no-member + slice_group_obj.slice_id.slice_uuid.uuid = slice_name # pylint: disable=no-member + slice_group_obj.name = slice_name + slice_group_obj.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member + #del slice_group_obj.slice_endpoint_ids[:] # no endpoints initially + #del slice_group_obj.slice_service_ids[:] # no sub-services + #del slice_group_obj.slice_subslice_ids[:] # no sub-slices + #del slice_group_obj.slice_config.config_rules[:] # no config rules + slice_group_obj.slice_owner.owner_uuid.uuid = 'TeraFlowSDN' # pylint: disable=no-member + slice_group_obj.slice_owner.owner_string = 'TeraFlowSDN' # pylint: disable=no-member + + constraint_sla_capacity = slice_group_obj.slice_constraints.add() # pylint: disable=no-member + constraint_sla_capacity.sla_capacity.capacity_gbps = capacity_gbps + + constraint_sla_availability = slice_group_obj.slice_constraints.add() # pylint: disable=no-member + constraint_sla_availability.sla_availability.num_disjoint_paths = 1 + constraint_sla_availability.sla_availability.all_active = True + constraint_sla_availability.sla_availability.availability = availability + + constraint_sla_isolation = slice_group_obj.slice_constraints.add() # pylint: disable=no-member + constraint_sla_isolation.sla_isolation.isolation_level.append(NO_ISOLATION) + + return slice_group_obj + +def create_slice_groups( + slice_groups : List[Tuple[str, float, float]], context_uuid : str = DEFAULT_CONTEXT_NAME +) -> Dict[str, SliceId]: + context_client = ContextClient() + create_context(context_client, context_uuid) + + slice_group_ids : Dict[str, SliceId] = dict() + for slice_group in slice_groups: + slice_group_name = slice_group[0] + slice_group_obj = get_slice(context_client, slice_group_name, DEFAULT_CONTEXT_NAME) + if slice_group_obj is None: + slice_group_obj = create_slice_group( + DEFAULT_CONTEXT_NAME, slice_group_name, slice_group[2], slice_group[1]) + slice_group_id = context_client.SetSlice(slice_group_obj) + slice_group_ids[slice_group_name] = slice_group_id + else: + slice_group_ids[slice_group_name] = slice_group_obj.slice_id + + return slice_group_ids + +def get_slice_grouping_parameters(slice_obj : Slice) -> Optional[Tuple[float, float]]: + isolation_levels : Set[int] = set() + availability : Optional[float] = None + capacity_gbps : Optional[float] = None + + for constraint in slice_obj.slice_constraints: + kind = constraint.WhichOneof('constraint') + if kind == 'sla_isolation': + isolation_levels.update(constraint.sla_isolation.isolation_level) + elif kind == 'sla_capacity': + capacity_gbps = constraint.sla_capacity.capacity_gbps + elif kind == 'sla_availability': + availability = constraint.sla_availability.availability + else: + continue + + no_isolation_level = len(isolation_levels) == 0 + single_isolation_level = len(isolation_levels) == 1 + has_no_isolation_level = NO_ISOLATION in isolation_levels + can_be_grouped = no_isolation_level or (single_isolation_level and has_no_isolation_level) + if not can_be_grouped: return None + if availability is None: return None + if capacity_gbps is None: return None + return availability, capacity_gbps + +def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool: + group_name, availability, capacity_gbps = selected_group + slice_uuid = slice_obj.slice_id.slice_uuid.uuid + + context_client = ContextClient() + slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True) + if slice_group_obj is None: + raise NotFoundException('Slice', group_name, extra_details='while adding to group') + + del slice_group_obj.slice_endpoint_ids[:] + for endpoint_id in slice_obj.slice_endpoint_ids: + slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id) + + del slice_group_obj.slice_constraints[:] + del slice_group_obj.slice_service_ids[:] + + del slice_group_obj.slice_subslice_ids[:] + slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id) + + del slice_group_obj.slice_config.config_rules[:] + for config_rule in slice_obj.slice_config.config_rules: + group_config_rule = slice_group_obj.slice_config.config_rules.add() + group_config_rule.CopyFrom(config_rule) + if config_rule.WhichOneof('config_rule') != 'custom': continue + TEMPLATE = '/subslice[{:s}]{:s}' + slice_resource_key = config_rule.custom.resource_key + group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key) + group_config_rule.custom.resource_key = group_resource_key + + context_client.SetSlice(slice_group_obj) + + metrics_exporter = MetricsExporter() + metrics_exporter.export_point( + slice_uuid, group_name, availability, capacity_gbps, is_center=False) + + return True + +def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool: + group_name, _, _ = selected_group + slice_uuid = slice_obj.slice_id.slice_uuid.uuid + + context_client = ContextClient() + slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True) + if slice_group_obj is None: + raise NotFoundException('Slice', group_name, extra_details='while removing from group') + + if slice_obj.slice_id in slice_group_obj.slice_subslice_ids: + tmp_slice_group_obj = Slice() + tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id) # pylint: disable=no-member + + tmp_slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id) # pylint: disable=no-member + + for endpoint_id in slice_obj.slice_endpoint_ids: + tmp_slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id) # pylint: disable=no-member + + for config_rule in slice_obj.slice_config.config_rules: + group_config_rule = tmp_slice_group_obj.slice_config.config_rules.add() # pylint: disable=no-member + group_config_rule.CopyFrom(config_rule) + if group_config_rule.WhichOneof('config_rule') != 'custom': continue + TEMPLATE = '/subslice[{:s}]{:s}' + slice_resource_key = group_config_rule.custom.resource_key + group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key) + group_config_rule.custom.resource_key = group_resource_key + + context_client.UnsetSlice(tmp_slice_group_obj) + + metrics_exporter = MetricsExporter() + metrics_exporter.delete_point(slice_uuid) + return True diff --git a/src/slice/service/slice_grouper/__init__.py b/src/slice/service/slice_grouper/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612 --- /dev/null +++ b/src/slice/service/slice_grouper/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/slice/tests/old/Main.py b/src/slice/tests/old/Main.py new file mode 100644 index 0000000000000000000000000000000000000000..0924f1c646e9722bf23354d0787786375663e85f --- /dev/null +++ b/src/slice/tests/old/Main.py @@ -0,0 +1,98 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, pandas, random, sys, time +#from matplotlib import pyplot as plt +from sklearn.cluster import KMeans +from typing import Dict, List, Tuple + +os.environ['METRICSDB_HOSTNAME' ] = '127.0.0.1' #'questdb-public.qdb.svc.cluster.local' +os.environ['METRICSDB_ILP_PORT' ] = '9009' +os.environ['METRICSDB_REST_PORT'] = '9000' + +from .MetricsExporter import MetricsExporter # pylint: disable=wrong-import-position + +logging.basicConfig(level=logging.DEBUG) +LOGGER : logging.Logger = logging.getLogger(__name__) + +def get_random_slices(count : int) -> List[Tuple[str, float, float]]: + slices = list() + for i in range(count): + slice_name = 'slice-{:03d}'.format(i) + slice_availability = random.uniform(00.0, 99.99) + slice_capacity_gbps = random.uniform(0.1, 100.0) + slices.append((slice_name, slice_availability, slice_capacity_gbps)) + return slices + +def init_kmeans() -> Tuple[KMeans, Dict[str, int]]: + groups = [ + # Name, avail[0..100], bw_gbps[0..100] + ('bronze', 10.0, 10.0), # ('silver', 25.0, 25.0), + ('silver', 30.0, 40.0), # ('silver', 25.0, 25.0), + ('gold', 70.0, 50.0), # ('gold', 90.0, 50.0), + ('platinum', 99.0, 100.0), + ] + df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity']) + + num_clusters = len(groups) + k_means = KMeans(n_clusters=num_clusters) + k_means.fit(df_groups[['availability', 'capacity']]) + + df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']]) + mapping = { + group['name']:{k:v for k,v in group.items() if k != 'name'} + for group in list(df_groups.to_dict('records')) + } + + return k_means, mapping + +def main(): + LOGGER.info('Starting...') + metrics_exporter = MetricsExporter() + metrics_exporter.create_table() + + k_means, mapping = init_kmeans() + label_to_group = {} + for group_name,group_attrs in mapping.items(): + label = group_attrs['label'] + availability = group_attrs['availability'] + capacity = group_attrs['capacity'] + metrics_exporter.export_point(group_name, group_name, availability, capacity, is_center=True) + label_to_group[label] = group_name + + slices = get_random_slices(10000) + for slice_ in slices: + sample = pandas.DataFrame([slice_[1:3]], columns=['availability', 'capacity']) + sample['label'] = k_means.predict(sample) + sample = sample.to_dict('records')[0] + label = sample['label'] + availability = sample['availability'] + capacity = sample['capacity'] + group_name = label_to_group[label] + metrics_exporter.export_point(slice_[0], group_name, availability, capacity, is_center=False) + time.sleep(0.01) + + #df_silver = df_slices[df_slices['group']==mapping['silver']] + #df_gold = df_slices[df_slices['group']==mapping['gold']] + #df_platinum = df_slices[df_slices['group']==mapping['platinum']] + #plt.scatter(df_silver.availability, df_silver.capacity, s=25, c='black' ) + #plt.scatter(df_gold.availability, df_gold.capacity, s=25, c='gold' ) + #plt.scatter(df_platinum.availability, df_platinum.capacity, s=25, c='silver') + #plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red' ) + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/slice/tests/old/MetricsExporter.py b/src/slice/tests/old/MetricsExporter.py new file mode 100644 index 0000000000000000000000000000000000000000..3c04cb9fcb1c7ab05c5274fb8e2a934a39b4cfdd --- /dev/null +++ b/src/slice/tests/old/MetricsExporter.py @@ -0,0 +1,116 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, logging, os, requests +from typing import Any, Literal, Union +from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module + +LOGGER = logging.getLogger(__name__) + +MAX_RETRIES = 10 +DELAY_RETRIES = 0.5 + +MSG_EXPORT_EXECUTED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed' +MSG_EXPORT_FAILED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...' +MSG_REST_BAD_STATUS = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}' +MSG_REST_EXECUTED = '[rest_request] Query({:s}) executed, result: {:s}' +MSG_REST_FAILED = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...' +MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}' + +METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') +METRICSDB_ILP_PORT = int(os.environ.get('METRICSDB_ILP_PORT')) +METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT')) +METRICSDB_TABLE_SLICE_GROUPS = 'slice_groups' + +COLORS = { + 'platinum': '#E5E4E2', + 'gold' : '#FFD700', + 'silver' : '#808080', + 'bronze' : '#CD7F32', +} +DEFAULT_COLOR = '#000000' # black + +class MetricsExporter(): + def __init__(self) -> None: + pass + + def create_table(self) -> None: + sql_query = ' '.join([ + 'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)), + ','.join([ + 'timestamp TIMESTAMP', + 'slice_uuid SYMBOL', + 'slice_group SYMBOL', + 'slice_color SYMBOL', + 'slice_availability DOUBLE', + 'slice_capacity_center DOUBLE', + 'slice_capacity DOUBLE', + ]), + ') TIMESTAMP(timestamp);' + ]) + try: + result = self.rest_request(sql_query) + if not result: raise Exception + LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS))) + except Exception as e: + LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e))) + raise + + def export_point( + self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float, + is_center : bool = False + ) -> None: + dt_timestamp = datetime.datetime.utcnow() + slice_color = COLORS.get(slice_group, DEFAULT_COLOR) + symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color) + columns = dict(slice_availability=slice_availability) + columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity + + for retry in range(MAX_RETRIES): + try: + with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender: + sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp) + sender.flush() + LOGGER.info(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns))) + return + except (Exception, IngressError): # pylint: disable=broad-except + LOGGER.exception(MSG_EXPORT_FAILED.format( + str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) + + def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]: + url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT) + params = {'query': rest_query, 'fmt': 'json'} + + for retry in range(MAX_RETRIES): + try: + response = requests.get(url, params=params) + status_code = response.status_code + if status_code not in {200}: + str_content = response.content.decode('UTF-8') + raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content)) + + json_response = response.json() + if 'ddl' in json_response: + LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl']))) + return True + elif 'dataset' in json_response: + LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset']))) + return json_response['dataset'] + + except Exception: # pylint: disable=broad-except + LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) diff --git a/src/slice/tests/old/test_kmeans.py b/src/slice/tests/old/test_kmeans.py new file mode 100644 index 0000000000000000000000000000000000000000..3f54621c57c3bfcc1741591e5d0a87781e640420 --- /dev/null +++ b/src/slice/tests/old/test_kmeans.py @@ -0,0 +1,77 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pandas, random, sys +from matplotlib import pyplot as plt +from sklearn.cluster import KMeans +from typing import Dict, List, Tuple + +def get_random_slices(count : int) -> List[Tuple[str, float, float]]: + slices = list() + for i in range(count): + slice_name = 'slice-{:03d}'.format(i) + slice_availability = random.uniform(00.0, 99.99) + slice_capacity_gbps = random.uniform(0.1, 100.0) + slices.append((slice_name, slice_availability, slice_capacity_gbps)) + return slices + +def init_kmeans() -> Tuple[KMeans, Dict[str, int]]: + groups = [ + # Name, avail[0..100], bw_gbps[0..100] + ('silver', 25.0, 50.0), # ('silver', 25.0, 25.0), + ('gold', 90.0, 10.0), # ('gold', 90.0, 50.0), + ('platinum', 99.0, 100.0), + ] + df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity']) + + num_clusters = len(groups) + k_means = KMeans(n_clusters=num_clusters) + k_means.fit(df_groups[['availability', 'capacity']]) + + df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']]) + mapping = {group['name']:group['label'] for group in list(df_groups.to_dict('records'))} + + return k_means, mapping + +def main(): + k_means, mapping = init_kmeans() + slices = get_random_slices(500) + df_slices = pandas.DataFrame(slices, columns=['slice_uuid', 'availability', 'capacity']) + + # predict one + #sample = df_slices[['availability', 'capacity']].iloc[[0]] + #y_predicted = k_means.predict(sample) + #y_predicted + + df_slices['group'] = k_means.predict(df_slices[['availability', 'capacity']]) + + df_silver = df_slices[df_slices['group']==mapping['silver']] + df_gold = df_slices[df_slices['group']==mapping['gold']] + df_platinum = df_slices[df_slices['group']==mapping['platinum']] + + plt.scatter(df_silver.availability, df_silver.capacity, s=25, c='black' ) + plt.scatter(df_gold.availability, df_gold.capacity, s=25, c='gold' ) + plt.scatter(df_platinum.availability, df_platinum.capacity, s=25, c='silver') + plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red' ) + plt.xlabel('service-slo-availability') + plt.ylabel('service-slo-one-way-bandwidth') + #ax = plt.subplot(1, 1, 1) + #ax.set_ylim(bottom=0., top=1.) + #ax.set_xlim(left=0.) + plt.savefig('slice_grouping.png') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/slice/tests/old/test_subslices.py b/src/slice/tests/old/test_subslices.py new file mode 100644 index 0000000000000000000000000000000000000000..39ee235df0e9d263244fa14436f609397bcea84f --- /dev/null +++ b/src/slice/tests/old/test_subslices.py @@ -0,0 +1,96 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sqlalchemy, sys +from sqlalchemy import Column, ForeignKey, String, event, insert +from sqlalchemy.orm import Session, declarative_base, relationship +from typing import Dict + +def _fk_pragma_on_connect(dbapi_con, con_record): + dbapi_con.execute('pragma foreign_keys=ON') + +_Base = declarative_base() + +class SliceModel(_Base): + __tablename__ = 'slice' + + slice_uuid = Column(String, primary_key=True) + + slice_subslices = relationship( + 'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid') + + def dump_id(self) -> Dict: + return {'uuid': self.slice_uuid} + + def dump(self) -> Dict: + return { + 'slice_id': self.dump_id(), + 'slice_subslice_ids': [ + slice_subslice.subslice.dump_id() + for slice_subslice in self.slice_subslices + ] + } + +class SliceSubSliceModel(_Base): + __tablename__ = 'slice_subslice' + + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) + subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) + + slice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined') + subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined') + +def main(): + engine = sqlalchemy.create_engine('sqlite:///:memory:', echo=False, future=True) + event.listen(engine, 'connect', _fk_pragma_on_connect) + + _Base.metadata.create_all(engine) + + slice_data = [ + {'slice_uuid': 'slice-01'}, + {'slice_uuid': 'slice-01-01'}, + {'slice_uuid': 'slice-01-02'}, + ] + + slice_subslices_data = [ + {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-01'}, + {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-02'}, + ] + + # insert + with engine.connect() as conn: + conn.execute(insert(SliceModel).values(slice_data)) + conn.execute(insert(SliceSubSliceModel).values(slice_subslices_data)) + conn.commit() + + # read + with Session(engine) as session: + obj_list = session.query(SliceModel).all() + print([obj.dump() for obj in obj_list]) + session.commit() + + return 0 + +if __name__ == '__main__': + sys.exit(main()) + +[ + {'slice_id': {'uuid': 'slice-01'}, 'slice_subslice_ids': [ + {'uuid': 'slice-01-01'}, + {'uuid': 'slice-01-02'} + ]}, + {'slice_id': {'uuid': 'slice-01-01'}, 'slice_subslice_ids': []}, + {'slice_id': {'uuid': 'slice-01-02'}, 'slice_subslice_ids': []} +] diff --git a/src/tests/benchmark/policy/deploy_specs.sh b/src/tests/benchmark/policy/deploy_specs.sh index 12a45ef92a538ff48682fe45172a27d77b2800a0..7d408f003ce411566b9bf2435d89c72ff5db1459 100755 --- a/src/tests/benchmark/policy/deploy_specs.sh +++ b/src/tests/benchmark/policy/deploy_specs.sh @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,21 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Set the URL of your local Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -# Supported components are: -# context device automation policy service compute monitoring webui -# interdomain slice pathcomp dlt -# dbscanserving opticalattackmitigator opticalattackdetector -# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" -# Set the name of the Kubernetes namespace to deploy to. +# Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment @@ -35,6 +33,60 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" # Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" -# If not already set, disable skip-build flag. -# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. -export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set the database name to be used by Context. +export CRDB_DATABASE="tfs" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" diff --git a/src/tests/benchmark/policy/tests/test_functional_bootstrap.py b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py index 65c46b4eb5aea8d5762484d1558c14745acf83ed..ca1882aaa22ff1ac20d0b1927199a6594a6c441a 100644 --- a/src/tests/benchmark/policy/tests/test_functional_bootstrap.py +++ b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py @@ -13,10 +13,10 @@ # limitations under the License. import logging, time +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, Empty from common.proto.monitoring_pb2 import KpiDescriptorList -from common.tests.LoadScenario import load_scenario_from_descriptor -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -27,44 +27,25 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_scenario_bootstrap( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure database is empty ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 + validate_empty_scenario(context_client) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - - # ----- Load Scenario ---------------------------------------------------------------------------------------------- - descriptor_loader = load_scenario_from_descriptor( - DESCRIPTOR_FILE, context_client, device_client, None, None) - - - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 def test_scenario_kpis_created( context_client : ContextClient, # pylint: disable=redefined-outer-name diff --git a/src/tests/benchmark/policy/tests/test_functional_cleanup.py b/src/tests/benchmark/policy/tests/test_functional_cleanup.py index e00c5ceeea6c59bf11bd2961802a9a3b805c5d2c..122526840796310519f8fe0feb8921e51467b21f 100644 --- a/src/tests/benchmark/policy/tests/test_functional_cleanup.py +++ b/src/tests/benchmark/policy/tests/test_functional_cleanup.py @@ -13,9 +13,10 @@ # limitations under the License. import logging -from common.tools.descriptor.Loader import DescriptorLoader +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from common.tools.object_factory.Context import json_context_id -from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from tests.Fixtures import context_client, device_client # pylint: disable=unused-import @@ -24,57 +25,20 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_services_removed( +def test_scenario_cleanup( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - - # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- - for link in descriptor_loader.links: - context_client.RemoveLink(LinkId(**link['link_id'])) - - for device in descriptor_loader.devices: - device_client .DeleteDevice(DeviceId(**device['device_id'])) - - for context_uuid, topology_list in descriptor_loader.topologies.items(): - for topology in topology_list: - context_client.RemoveTopology(TopologyId(**topology['topology_id'])) - - for context in descriptor_loader.contexts: - context_client.RemoveContext(ContextId(**context['context_id'])) - - - # ----- List entities - Ensure database is empty again ------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/benchmark/policy/tests/test_functional_create_service.py b/src/tests/benchmark/policy/tests/test_functional_create_service.py index 919f81979305831b69a82f13fbe4b70bd20ea70f..dd7761f3871db48752f313dc53e8b7d2e2c38489 100644 --- a/src/tests/benchmark/policy/tests/test_functional_create_service.py +++ b/src/tests/benchmark/policy/tests/test_functional_create_service.py @@ -13,83 +13,61 @@ # limitations under the License. import logging, random -from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextId, Empty +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.descriptor.Loader import DescriptorLoader from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient -from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import +from tests.Fixtures import context_client, monitoring_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM -from .Fixtures import osm_wim # pylint: disable=unused-import +from .Fixtures import osm_wim # pylint: disable=unused-import from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value -DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value - DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - # ----- Create Service --------------------------------------------------------------------------------------------- + # Create Connectivity Service service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI) - - for service in response.services: - service_id = service.service_id - response = context_client.ListConnections(service_id) - LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), - grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # one connection per service + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice + + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI + + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + + if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: + assert len(response.connections) == 1 # 1 connection per service + elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: + assert len(response.connections) == 1 # 1 connection per service + else: + str_service = grpc_message_to_json_string(service) + raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) def test_scenario_kpi_values_created( diff --git a/src/tests/benchmark/policy/tests/test_functional_delete_service.py b/src/tests/benchmark/policy/tests/test_functional_delete_service.py index 6f6ca602980fb05ffafd17f44a5bc64671c4c7b0..4fffc115e6c0ea881dea637dd741f99715d28c6a 100644 --- a/src/tests/benchmark/policy/tests/test_functional_delete_service.py +++ b/src/tests/benchmark/policy/tests/test_functional_delete_service.py @@ -14,86 +14,61 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader -from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from tests.Fixtures import context_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM -from .Fixtures import osm_wim # pylint: disable=unused-import - +from .Fixtures import osm_wim # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value -DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value - DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' - +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice - descriptor_loader = DescriptorLoader(descriptors) + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - l3nm_service_uuids = set() - response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) - assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) + service_uuids = set() for service in response.services: service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: + assert len(response.connections) == 1 # 1 connection per service service_uuid = service_id.service_uuid.uuid - l3nm_service_uuids.add(service_uuid) + service_uuids.add(service_uuid) osm_wim.conn_info[service_uuid] = {} - - response = context_client.ListConnections(service_id) - LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), - grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # one connection per service + elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: + assert len(response.connections) == 1 # 1 connection per service + else: + str_service = grpc_message_to_json_string(service) + raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) # Identify service to delete - assert len(l3nm_service_uuids) == 1 # assume a single L3NM service has been created - l3nm_service_uuid = set(l3nm_service_uuids).pop() - - - # ----- Delete Service --------------------------------------------------------------------------------------------- - osm_wim.delete_connectivity_service(l3nm_service_uuid) - - - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies + assert len(service_uuids) == 1 # assume a single L3NM service has been created + service_uuid = set(service_uuids).pop() - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices + # Delete Connectivity Service + osm_wim.delete_connectivity_service(service_uuid) - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh index 874774e1ca50830832e842e49b6fff1114cb85d8..6c3d9db662a8232f1fcccf3835b98d69571b6337 100755 --- a/src/tests/ecoc22/deploy_specs.sh +++ b/src/tests/ecoc22/deploy_specs.sh @@ -20,7 +20,6 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator" export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. @@ -57,7 +56,7 @@ export CRDB_DATABASE="tfs" # See ./deploy/all.sh or ./deploy/crdb.sh for additional details export CRDB_DEPLOY_MODE="single" -# Disable flag for dropping database, if exists. +# Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. @@ -75,20 +74,20 @@ export NATS_REDEPLOY="" # ----- QuestDB ---------------------------------------------------------------- -# If not already set, set the namespace where QuestDB will be deployed. +# Set the namespace where QuestDB will be deployed. export QDB_NAMESPACE="qdb" -# If not already set, set the database username to be used by Monitoring. +# Set the database username to be used for QuestDB. export QDB_USERNAME="admin" -# If not already set, set the database user's password to be used by Monitoring. +# Set the database user's password to be used for QuestDB. export QDB_PASSWORD="quest" -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE="tfs_monitoring" +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" -## If not already set, disable flag for dropping table if exists. -#export QDB_DROP_TABLE_IF_EXISTS="" +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" -# If not already set, disable flag for re-deploying QuestDB from scratch. +# Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py index 3b7b5009c0dbe9d95b4ee8e2cdbe33d39008a7a1..05691d0b274df019a87bd870fec2b9ffa3245612 100644 --- a/src/tests/ecoc22/tests/test_functional_bootstrap.py +++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py @@ -14,8 +14,8 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty -from common.tests.LoadScenario import load_scenario_from_descriptor +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -31,45 +31,15 @@ def test_scenario_bootstrap( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure database is empty ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 + validate_empty_scenario(context_client) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - - # ----- Load Scenario ---------------------------------------------------------------------------------------------- - descriptor_loader = load_scenario_from_descriptor( - DESCRIPTOR_FILE, context_client, device_client, None, None) - - - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py index 3e8b5ea65fe8249102ba17b9d4ce3f2cf2296dda..088c19799615169bf8c60ae5a9226fe02ec0e4ff 100644 --- a/src/tests/ecoc22/tests/test_functional_cleanup.py +++ b/src/tests/ecoc22/tests/test_functional_cleanup.py @@ -14,8 +14,8 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId -from common.tools.descriptor.Loader import DescriptorLoader +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -27,64 +27,18 @@ LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) -def test_services_removed( +def test_scenario_cleanup( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- - for link in descriptor_loader.links: - context_client.RemoveLink(LinkId(**link['link_id'])) - - for device in descriptor_loader.devices: - device_client .DeleteDevice(DeviceId(**device['device_id'])) - - for context_uuid, topology_list in descriptor_loader.topologies.items(): - for topology in topology_list: - context_client.RemoveTopology(TopologyId(**topology['topology_id'])) - - for context in descriptor_loader.contexts: - context_client.RemoveContext(ContextId(**context['context_id'])) - - - # ----- List entities - Ensure database is empty again ------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py index 6dd4eb827c0fbafdf0bce81c7702af5fd5fe007b..dab9c7eb131434a16dad01be4fb8cd6b6b322515 100644 --- a/src/tests/ecoc22/tests/test_functional_create_service.py +++ b/src/tests/ecoc22/tests/test_functional_create_service.py @@ -14,7 +14,7 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id @@ -31,57 +31,23 @@ DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Create Service --------------------------------------------------------------------------------------------- + # Create Connectivity Service service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py index 5cfdc34733d8ddc6927b52131a187fb097b36d9d..710e1a817f00f0b1664439d1c816195202a69a9d 100644 --- a/src/tests/ecoc22/tests/test_functional_delete_service.py +++ b/src/tests/ecoc22/tests/test_functional_delete_service.py @@ -14,14 +14,14 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader -from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient -from tests.Fixtures import context_client # pylint: disable=unused-import +from tests.Fixtures import context_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM -from .Fixtures import osm_wim # pylint: disable=unused-import +from .Fixtures import osm_wim # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -30,44 +30,27 @@ DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice - service_uuids = set() response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) assert len(response.services) == 3 # 1xL2NM + 2xTAPI + service_uuids = set() for service in response.services: service_id = service.service_id - - if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM: - service_uuid = service_id.service_uuid.uuid - service_uuids.add(service_uuid) - osm_wim.conn_info[service_uuid] = {} - response = context_client.ListConnections(service_id) LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM: assert len(response.connections) == 2 # 2 connections per service (primary + backup) + service_uuid = service_id.service_uuid.uuid + service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: assert len(response.connections) == 1 # 1 connection per service else: @@ -78,34 +61,14 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p assert len(service_uuids) == 1 # assume a single L2NM service has been created service_uuid = set(service_uuids).pop() - - # ----- Delete Service --------------------------------------------------------------------------------------------- + # Delete Connectivity Service osm_wim.delete_connectivity_service(service_uuid) - - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh index 874774e1ca50830832e842e49b6fff1114cb85d8..6c3d9db662a8232f1fcccf3835b98d69571b6337 100755 --- a/src/tests/ofc22/deploy_specs.sh +++ b/src/tests/ofc22/deploy_specs.sh @@ -20,7 +20,6 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator" export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. @@ -57,7 +56,7 @@ export CRDB_DATABASE="tfs" # See ./deploy/all.sh or ./deploy/crdb.sh for additional details export CRDB_DEPLOY_MODE="single" -# Disable flag for dropping database, if exists. +# Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. @@ -75,20 +74,20 @@ export NATS_REDEPLOY="" # ----- QuestDB ---------------------------------------------------------------- -# If not already set, set the namespace where QuestDB will be deployed. +# Set the namespace where QuestDB will be deployed. export QDB_NAMESPACE="qdb" -# If not already set, set the database username to be used by Monitoring. +# Set the database username to be used for QuestDB. export QDB_USERNAME="admin" -# If not already set, set the database user's password to be used by Monitoring. +# Set the database user's password to be used for QuestDB. export QDB_PASSWORD="quest" -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE="tfs_monitoring" +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" -## If not already set, disable flag for dropping table if exists. -#export QDB_DROP_TABLE_IF_EXISTS="" +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" -# If not already set, disable flag for re-deploying QuestDB from scratch. +# Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/src/tests/ofc22/descriptors_emulated_xr.json b/src/tests/ofc22/descriptors_emulated_xr.json index d6a2f023422902bfc3d216771092f6081d8cf6b5..4e247bb30d4df25fa75d30a3baa94f1348c0a6d9 100644 --- a/src/tests/ofc22/descriptors_emulated_xr.json +++ b/src/tests/ofc22/descriptors_emulated_xr.json @@ -31,7 +31,10 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} ]}, "device_operational_status": 1, "device_drivers": [0], @@ -43,7 +46,10 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} ]}, "device_operational_status": 1, "device_drivers": [0], @@ -55,7 +61,10 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} ]}, "device_operational_status": 1, "device_drivers": [0], @@ -67,7 +76,10 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} ]}, "device_operational_status": 1, "device_drivers": [0], diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py index ad2d5703a931c933a9ab4e7162dd1985e5a33d9d..ca1882aaa22ff1ac20d0b1927199a6594a6c441a 100644 --- a/src/tests/ofc22/tests/test_functional_bootstrap.py +++ b/src/tests/ofc22/tests/test_functional_bootstrap.py @@ -16,7 +16,7 @@ import logging, time from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, Empty from common.proto.monitoring_pb2 import KpiDescriptorList -from common.tests.LoadScenario import load_scenario_from_descriptor +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -33,45 +33,15 @@ def test_scenario_bootstrap( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure database is empty ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 + validate_empty_scenario(context_client) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - - # ----- Load Scenario ---------------------------------------------------------------------------------------------- - descriptor_loader = load_scenario_from_descriptor( - DESCRIPTOR_FILE, context_client, device_client, None, None) - - - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py index d38b653b226639d5c8c831872a64ea1f9140ef8f..122526840796310519f8fe0feb8921e51467b21f 100644 --- a/src/tests/ofc22/tests/test_functional_cleanup.py +++ b/src/tests/ofc22/tests/test_functional_cleanup.py @@ -14,8 +14,8 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId -from common.tools.descriptor.Loader import DescriptorLoader +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -27,64 +27,18 @@ LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) -def test_services_removed( +def test_scenario_cleanup( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- - for link in descriptor_loader.links: - context_client.RemoveLink(LinkId(**link['link_id'])) - - for device in descriptor_loader.devices: - device_client .DeleteDevice(DeviceId(**device['device_id'])) - - for context_uuid, topology_list in descriptor_loader.topologies.items(): - for topology in topology_list: - context_client.RemoveTopology(TopologyId(**topology['topology_id'])) - - for context in descriptor_loader.contexts: - context_client.RemoveContext(ContextId(**context['context_id'])) - - - # ----- List entities - Ensure database is empty again ------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py index 92e0a74f9d291ea49422580fbdfad2c354aeeee2..dd7761f3871db48752f313dc53e8b7d2e2c38489 100644 --- a/src/tests/ofc22/tests/test_functional_create_service.py +++ b/src/tests/ofc22/tests/test_functional_create_service.py @@ -21,7 +21,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient -from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import +from tests.Fixtures import context_client, monitoring_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM from .Fixtures import osm_wim # pylint: disable=unused-import from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE @@ -33,61 +33,27 @@ DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Create Service --------------------------------------------------------------------------------------------- + # Create Connectivity Service service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI for service in response.services: service_id = service.service_id @@ -104,7 +70,6 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) - def test_scenario_kpi_values_created( monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name ) -> None: diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py index 1811f219acf13b5cc17daf39f1931a6f630f997b..4fffc115e6c0ea881dea637dd741f99715d28c6a 100644 --- a/src/tests/ofc22/tests/test_functional_delete_service.py +++ b/src/tests/ofc22/tests/test_functional_delete_service.py @@ -14,10 +14,10 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader -from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from tests.Fixtures import context_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM @@ -30,44 +30,27 @@ DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice - service_uuids = set() response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI + service_uuids = set() for service in response.services: service_id = service.service_id - - if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: - service_uuid = service_id.service_uuid.uuid - service_uuids.add(service_uuid) - osm_wim.conn_info[service_uuid] = {} - response = context_client.ListConnections(service_id) LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: assert len(response.connections) == 1 # 1 connection per service + service_uuid = service_id.service_uuid.uuid + service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: assert len(response.connections) == 1 # 1 connection per service else: @@ -78,34 +61,14 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p assert len(service_uuids) == 1 # assume a single L3NM service has been created service_uuid = set(service_uuids).pop() - - # ----- Delete Service --------------------------------------------------------------------------------------------- + # Delete Connectivity Service osm_wim.delete_connectivity_service(service_uuid) - - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() diff --git a/src/tests/p4/tests/test_functional_bootstrap.py b/src/tests/p4/tests/test_functional_bootstrap.py index 11b24adf137f0b06d1176b440a7fd93b5ad24e80..97269217336986a6a143a4a7ef94bd8b0710e9b0 100644 --- a/src/tests/p4/tests/test_functional_bootstrap.py +++ b/src/tests/p4/tests/test_functional_bootstrap.py @@ -25,6 +25,10 @@ from common.proto.context_pb2 import ConfigActionEnum, Context, ContextId, Devic from device.client.DeviceClient import DeviceClient from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from common.tools.object_factory.ConfigRule import ( + json_config_rule_set, json_config_rule_delete) + + LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -48,15 +52,18 @@ def test_prepare_scenario(context_client : ContextClient): # pylint: disable=re context_uuid = context['context_id']['context_uuid']['uuid'] LOGGER.info('Adding Context {:s}'.format(context_uuid)) response = context_client.SetContext(Context(**context)) - assert response.context_uuid.uuid == context_uuid + context_data = context_client.GetContext(response) + assert context_data.name == context_uuid for topology in TOPOLOGIES: context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] topology_uuid = topology['topology_id']['topology_uuid']['uuid'] LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) response = context_client.SetTopology(Topology(**topology)) - assert response.context_id.context_uuid.uuid == context_uuid - assert response.topology_uuid.uuid == topology_uuid +# assert response.context_id.context_uuid.uuid == context_uuid + + topology_data = context_client.GetTopology(response) + assert topology_data.name == topology_uuid context_id = json_context_id(context_uuid) @@ -81,19 +88,24 @@ def test_devices_bootstraping( device_p4_with_connect_rules = copy.deepcopy(device) device_p4_with_connect_rules['device_config']['config_rules'].extend(connect_rules) + device_p4_with_connect_rules['device_operational_status'] = \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED response = device_client.AddDevice(Device(**device_p4_with_connect_rules)) - assert response.device_uuid.uuid == device_uuid + LOGGER.info('Adding Device {:s}'.format(device_uuid)) device_p4_with_endpoints = copy.deepcopy(device) + device_p4_with_endpoints['device_id']['device_uuid']['uuid'] = response.device_uuid.uuid device_p4_with_endpoints['device_endpoints'].extend(endpoints) + for i in device_p4_with_endpoints['device_endpoints']: + i['endpoint_id']['device_id']['device_uuid']['uuid'] = response.device_uuid.uuid + + LOGGER.info('Adding Endpoints {:s}'.format(device_uuid)) device_client.ConfigureDevice(Device(**device_p4_with_endpoints)) for link in LINKS: link_uuid = link['link_id']['link_uuid']['uuid'] LOGGER.info('Adding Link {:s}'.format(link_uuid)) response = context_client.SetLink(Link(**link)) - assert response.link_uuid.uuid == link_uuid - context_client.SetLink(Link(**link)) def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- diff --git a/src/tests/p4/tests/test_functional_cleanup.py b/src/tests/p4/tests/test_functional_cleanup.py index 852f2a655dd5ba6cc80902a09d3b118b34d8da47..aad56a2104797ed7238241c4d3eda8eab3c1a907 100644 --- a/src/tests/p4/tests/test_functional_cleanup.py +++ b/src/tests/p4/tests/test_functional_cleanup.py @@ -58,7 +58,6 @@ def test_scenario_cleanup( device_uuid = device_id['device_uuid']['uuid'] LOGGER.info('Deleting Device {:s}'.format(device_uuid)) device_client.DeleteDevice(DeviceId(**device_id)) - #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) response = context_client.ListDevices(Empty()) assert len(response.devices) == 0 @@ -72,7 +71,6 @@ def test_scenario_cleanup( LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) context_client.RemoveTopology(TopologyId(**topology_id)) context_id = json_context_id(context_uuid) - #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- for context in CONTEXTS: @@ -80,4 +78,3 @@ def test_scenario_cleanup( context_uuid = context_id['context_uuid']['uuid'] LOGGER.info('Deleting Context {:s}'.format(context_uuid)) context_client.RemoveContext(ContextId(**context_id)) - #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) diff --git a/src/tests/p4/tests/test_functional_create_service.py b/src/tests/p4/tests/test_functional_create_service.py index f160d3c6fbe4d560f821d0d70e90a2b3e44e4e8b..76a681eeaff30434663a2391509c3f266e89ecb0 100644 --- a/src/tests/p4/tests/test_functional_create_service.py +++ b/src/tests/p4/tests/test_functional_create_service.py @@ -54,15 +54,6 @@ def service_client(): def test_rules_entry( context_client : ContextClient, device_client : DeviceClient, service_client : ServiceClient): # pylint: disable=redefined-outer-name - - - for device, _, __ in DEVICES: - # Enable device - device_p4_with_operational_status = copy.deepcopy(device) - device_p4_with_operational_status['device_operational_status'] = \ - DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - device_client.ConfigureDevice(Device(**device_p4_with_operational_status)) - # ----- Create Services --------------------------------------------------------------- for service, endpoints in SERVICES: # Insert Service (table entries) @@ -72,22 +63,3 @@ def test_rules_entry( service_client.CreateService(Service(**service_p4)) service_p4['service_endpoint_ids'].extend(endpoints) service_client.UpdateService(Service(**service_p4)) - - - -""" -con_cl = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) -dev_cl = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) -srv_cl = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC')) - -for service, endpoints in SERVICES: - service_uuid = service['service_id']['service_uuid']['uuid'] - print('Creating Service {:s}'.format(service_uuid)) - service_p4 = copy.deepcopy(service) - srv_cl.CreateService(Service(**service_p4)) - #service_data = con_cl.GetService(ServiceId(**json_service_id('svc1'))) - #print('service_data = {:s}'.format(grpc_message_to_json_string(service_data))) - service_p4 = copy.deepcopy(service) - service_p4['service_endpoint_ids'].extend(endpoints) - srv_cl.UpdateService(Service(**service_p4)) -""" \ No newline at end of file diff --git a/src/tests/p4/tests/test_functional_delete_service.py b/src/tests/p4/tests/test_functional_delete_service.py index 4d637cf88d840a20f38855beb7839e2b704016d4..c5821df4ccc1caa2a1d72ed98dbfcb82e9db21b1 100644 --- a/src/tests/p4/tests/test_functional_delete_service.py +++ b/src/tests/p4/tests/test_functional_delete_service.py @@ -60,10 +60,3 @@ def test_rules_delete( print('Deleting Service {:s}'.format(service_uuid)) service_p4 = copy.deepcopy(service) response = service_client.DeleteService(ServiceId(**json_service_id(service_uuid, CONTEXT_ID))) - - # ----- Disable Devices --------------------------------------------------------------- - for device, _, _ in DEVICES: - device_p4_with_operational_status = copy.deepcopy(device) - device_p4_with_operational_status['device_operational_status'] = \ - DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - device_client.ConfigureDevice(Device(**device_p4_with_operational_status)) diff --git a/src/tests/tools/load_scenario/__main__.py b/src/tests/tools/load_scenario/__main__.py index 3559f778d7cf850c3bbb4f2d516f45f18423d28c..df1d5d8bf3d729a459ab6570e81e6ea05f47c981 100644 --- a/src/tests/tools/load_scenario/__main__.py +++ b/src/tests/tools/load_scenario/__main__.py @@ -13,7 +13,7 @@ # limitations under the License. import logging, sys -from common.tests.LoadScenario import load_scenario_from_descriptor +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient @@ -29,7 +29,12 @@ def main(): slice_client = SliceClient() LOGGER.info('Loading scenario...') - load_scenario_from_descriptor(sys.argv[1], context_client, device_client, service_client, slice_client) + descriptor_loader = DescriptorLoader( + descriptors_file=sys.argv[1], context_client=context_client, device_client=device_client, + service_client=service_client, slice_client=slice_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() LOGGER.info('Done!') return 0 diff --git a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json index a4109bc7b18d2855f97f5bb329d4354a04b31607..2d4ed3eaf1834f24ba966fbcaac523ca9a3afb9a 100644 --- a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json +++ b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json @@ -12,8 +12,8 @@ {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "EXT"}} ], "service_constraints": [ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "15.2"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 15.2}} ], "service_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "/settings", "resource_value": { diff --git a/src/webui/grafana_dashboard_psql.json b/src/webui/grafana_db_mon_kpis_psql.json similarity index 91% rename from src/webui/grafana_dashboard_psql.json rename to src/webui/grafana_db_mon_kpis_psql.json index ec89c1647cc1086140b0bbd35354546c405ce910..750e5254ea1e4e689d92fc39cedd22a5ee619e03 100644 --- a/src/webui/grafana_dashboard_psql.json +++ b/src/webui/grafana_db_mon_kpis_psql.json @@ -33,7 +33,7 @@ { "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, "fieldConfig": { "defaults": { @@ -162,14 +162,14 @@ { "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, "format": "time_series", "group": [], "hide": false, "metricColumn": "kpi_value", "rawQuery": true, - "rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_name, endpoint_name, kpi_sample_type\r\nFROM\r\n tfs_monitoring\r\nWHERE\r\n $__timeFilter(timestamp) AND device_name IN (${device_name}) AND endpoint_name IN (${endpoint_name}) AND kpi_sample_type IN (${kpi_sample_type})\r\nGROUP BY\r\n device_name, endpoint_name, kpi_sample_type\r\nORDER BY\r\n timestamp", + "rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_name, endpoint_name, kpi_sample_type\r\nFROM\r\n tfs_monitoring_kpis\r\nWHERE\r\n $__timeFilter(timestamp) AND device_name IN (${device_name}) AND endpoint_name IN (${endpoint_name}) AND kpi_sample_type IN (${kpi_sample_type})\r\nGROUP BY\r\n device_name, endpoint_name, kpi_sample_type\r\nORDER BY\r\n timestamp", "refId": "A", "select": [ [ @@ -181,7 +181,7 @@ } ] ], - "table": "monitoring", + "table": "tfs_monitoring_kpis", "timeColumn": "timestamp", "where": [ { @@ -227,16 +227,16 @@ }, "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, - "definition": "SELECT DISTINCT device_name FROM tfs_monitoring;", + "definition": "SELECT DISTINCT device_name FROM tfs_monitoring_kpis;", "hide": 0, "includeAll": true, "label": "Device", "multi": true, "name": "device_name", "options": [], - "query": "SELECT DISTINCT device_name FROM tfs_monitoring;", + "query": "SELECT DISTINCT device_name FROM tfs_monitoring_kpis;", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -255,16 +255,16 @@ }, "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, - "definition": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})", + "definition": "SELECT DISTINCT endpoint_name FROM tfs_monitoring_kpis WHERE device_name IN (${device_name})", "hide": 0, "includeAll": true, "label": "EndPoint", "multi": true, "name": "endpoint_name", "options": [], - "query": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})", + "query": "SELECT DISTINCT endpoint_name FROM tfs_monitoring_kpis WHERE device_name IN (${device_name})", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -283,16 +283,16 @@ }, "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, - "definition": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;", + "definition": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring_kpis;", "hide": 0, "includeAll": true, "label": "Kpi Sample Type", "multi": true, "name": "kpi_sample_type", "options": [], - "query": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;", + "query": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring_kpis;", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -308,7 +308,7 @@ "timepicker": {}, "timezone": "utc", "title": "L3 Monitoring", - "uid": "tf-l3-monit", + "uid": "tfs-l3-monit", "version": 6, "weekStart": "" } diff --git a/src/webui/grafana_db_slc_grps_psql.json b/src/webui/grafana_db_slc_grps_psql.json new file mode 100644 index 0000000000000000000000000000000000000000..6aa7a478b6a19a83fa1677579163859eca6dd348 --- /dev/null +++ b/src/webui/grafana_db_slc_grps_psql.json @@ -0,0 +1,176 @@ +{"overwrite": true, "folderId": 0, "dashboard": + { + "id": null, + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "postgres", + "uid": "questdb-slc-grp" + }, + "gridPos": { + "h": 21, + "w": 11, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "ReferenceLines": [], + "border": { + "color": "yellow", + "size": 0 + }, + "fieldSets": [ + { + "col": 6, + "color": "#C4162A", + "colorCol": 3, + "dotSize": 2, + "hidden": false, + "lineSize": 1, + "lineType": "none", + "polynomialOrder": 3, + "sizeCol": -7 + }, + { + "col": 5, + "color": "#edcd7d", + "colorCol": 3, + "dotSize": 2, + "hidden": false, + "lineSize": 1, + "lineType": "none", + "polynomialOrder": 3, + "sizeCol": -2 + } + ], + "grid": { + "color": "gray" + }, + "label": { + "col": -1, + "color": "#CCC", + "textSize": 2 + }, + "legend": { + "show": false, + "size": 0 + }, + "xAxis": { + "col": 4, + "inverted": false + }, + "xAxisExtents": { + "min": 0, + "max": 100 + }, + "xAxisTitle": { + "text": "Availability %", + "color": "white", + "textSize": 2, + "rotated": false, + "logScale": false, + "fontSize": 4, + "fontColor": "white" + }, + "xMargins": { + "lower": 30, + "upper": 10 + }, + "yAxisExtents": { + "min": 0, + "max": 100 + }, + "yAxisTitle": { + "text": "Capacity Gb/s", + "color": "#ccccdc", + "textSize": 2, + "rotated": true, + "logScale": false, + "fontSize": 4, + "fontColor": "white" + }, + "yMargins": { + "lower": 20, + "upper": 20 + } + }, + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "questdb-slc-grp" + }, + "format": "table", + "group": [], + "hide": false, + "metricColumn": "none", + "rawQuery": true, + "rawSql": "SELECT timestamp as \"time\", slice_uuid, slice_group, slice_color, slice_availability, slice_capacity, slice_capacity_center, is_deleted\nFROM tfs_slice_groups\nWHERE $__timeFilter(timestamp) AND is_deleted <> 'true';", + "refId": "A", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "column" + } + ] + ], + "table": "tfs_slice_groups", + "timeColumn": "timestamp", + "where": [] + } + ], + "title": "Slice Groups", + "transformations": [], + "type": "michaeldmoore-scatter-panel" + } + ], + "refresh": "5s", + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Slice Grouping", + "uid": "tfs-slice-grps", + "version": 2, + "weekStart": "" + } +} diff --git a/src/webui/grafana_backup_dashboard.json b/src/webui/old/grafana_backup_dashboard.json similarity index 100% rename from src/webui/grafana_backup_dashboard.json rename to src/webui/old/grafana_backup_dashboard.json diff --git a/src/webui/grafana_dashboard.json b/src/webui/old/grafana_dashboard.json similarity index 100% rename from src/webui/grafana_dashboard.json rename to src/webui/old/grafana_dashboard.json diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index d5b40b486dd7772cea29fd7d71db949b2954155c..fca1071419b3b2b61739c2a0d1d8bfa45aba5119 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -96,7 +96,9 @@ def create_app(use_config=None, web_app_root=None): app.register_blueprint(link) app.jinja_env.globals.update({ # pylint: disable=no-member + 'enumerate' : enumerate, 'json_to_list' : json_to_list, + 'round' : round, 'get_working_context' : get_working_context, 'get_working_topology': get_working_topology, }) diff --git a/src/webui/service/device/forms.py b/src/webui/service/device/forms.py index e496c4d432c7c9d02227141ea6d618984378c185..c6bacac9bc1723a020f3057fad9c9e8306c9dbca 100644 --- a/src/webui/service/device/forms.py +++ b/src/webui/service/device/forms.py @@ -12,21 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# external imports from flask_wtf import FlaskForm -from wtforms import StringField, SelectField, TextAreaField, SubmitField, BooleanField, Form -from wtforms.validators import DataRequired, Length, NumberRange, Regexp, ValidationError +from wtforms import StringField, SelectField, TextAreaField, SubmitField, BooleanField +from wtforms.validators import DataRequired, Length, NumberRange, ValidationError from common.proto.context_pb2 import DeviceOperationalStatusEnum -from webui.utils.form_validators import key_value_validator class AddDeviceForm(FlaskForm): device_id = StringField('ID', validators=[DataRequired(), Length(min=5)]) - device_type = SelectField('Type', choices = []) - operational_status = SelectField('Operational Status', - # choices=[(-1, 'Select...'), (0, 'Undefined'), (1, 'Disabled'), (2, 'Enabled')], - coerce=int, - validators=[NumberRange(min=0)]) + device_type = SelectField('Type') + operational_status = SelectField('Operational Status', coerce=int, validators=[NumberRange(min=0)]) device_drivers_undefined = BooleanField('UNDEFINED / EMULATED') device_drivers_openconfig = BooleanField('OPENCONFIG') device_drivers_transport_api = BooleanField('TRANSPORT_API') diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index ce3edcfda45859c3e5db83c62fd328ee546762a5..ebf77a35ffdf9c2546ddbdd1bac0c8c1f54a2b56 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -14,16 +14,14 @@ import json from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for +from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( - ConfigActionEnum, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, Empty, TopologyId) -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Topology import json_topology_id + ConfigActionEnum, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, Empty) +from common.tools.context_queries.Device import get_device +from common.tools.context_queries.Topology import get_topology from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from webui.service.device.forms import AddDeviceForm -from common.DeviceTypes import DeviceTypeEnum -from webui.service.device.forms import ConfigForm -from webui.service.device.forms import UpdateDeviceForm +from webui.service.device.forms import AddDeviceForm, ConfigForm, UpdateDeviceForm device = Blueprint('device', __name__, url_prefix='/device') context_client = ContextClient() @@ -39,17 +37,19 @@ def home(): topology_uuid = session['topology_uuid'] context_client.connect() - json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) - grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) - topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} - grpc_devices: DeviceList = context_client.ListDevices(Empty()) + grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False) + if grpc_topology is None: + flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger') + devices = [] + else: + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + grpc_devices: DeviceList = context_client.ListDevices(Empty()) + devices = [ + device for device in grpc_devices.devices + if device.device_id.device_uuid.uuid in topo_device_uuids + ] context_client.close() - devices = [ - device for device in grpc_devices.devices - if device.device_id.device_uuid.uuid in topo_device_uuids - ] - return render_template( 'device/home.html', devices=devices, dde=DeviceDriverEnum, dose=DeviceOperationalStatusEnum) @@ -71,23 +71,23 @@ def add(): if form.validate_on_submit(): device_obj = Device() # Device UUID: - device_obj.device_id.device_uuid.uuid = form.device_id.data + device_obj.device_id.device_uuid.uuid = form.device_id.data # pylint: disable=no-member # Device type: device_obj.device_type = str(form.device_type.data) # Device configurations: - config_rule = device_obj.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/address' config_rule.custom.resource_value = form.device_config_address.data - config_rule = device_obj.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/port' config_rule.custom.resource_value = form.device_config_port.data - config_rule = device_obj.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/settings' @@ -105,20 +105,22 @@ def add(): device_obj.device_operational_status = form.operational_status.data # Device drivers: + device_drivers = list() if form.device_drivers_undefined.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED) if form.device_drivers_openconfig.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG) if form.device_drivers_transport_api.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API) if form.device_drivers_p4.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4) if form.device_drivers_ietf_network_topology.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY) if form.device_drivers_onf_tr_352.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352) if form.device_drivers_xr.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR) + device_obj.device_drivers.extend(device_drivers) # pylint: disable=no-member try: device_client.connect() @@ -126,7 +128,7 @@ def add(): device_client.close() flash(f'New device was created with ID "{response.device_uuid.uuid}".', 'success') return redirect(url_for('device.home')) - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem adding the device. {e.details()}', 'danger') return render_template('device/add.html', form=form, @@ -134,14 +136,15 @@ def add(): @device.route('detail/', methods=['GET', 'POST']) def detail(device_uuid: str): - request = DeviceId() - request.device_uuid.uuid = device_uuid context_client.connect() - response = context_client.GetDevice(request) + device_obj = get_device(context_client, device_uuid, rw_copy=False) + if device_obj is None: + flash('Device({:s}) not found'.format(str(device_uuid)), 'danger') + device_obj = Device() context_client.close() - return render_template('device/detail.html', device=response, - dde=DeviceDriverEnum, - dose=DeviceOperationalStatusEnum) + + return render_template( + 'device/detail.html', device=device_obj, dde=DeviceDriverEnum, dose=DeviceOperationalStatusEnum) @device.get('/delete') def delete(device_uuid): @@ -154,13 +157,13 @@ def delete(device_uuid): # TODO: finalize implementation request = DeviceId() - request.device_uuid.uuid = device_uuid + request.device_uuid.uuid = device_uuid # pylint: disable=no-member device_client.connect() - response = device_client.DeleteDevice(request) + device_client.DeleteDevice(request) device_client.close() flash(f'Device "{device_uuid}" deleted successfully!', 'success') - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem deleting device "{device_uuid}": {e.details()}', 'danger') current_app.logger.exception(e) return redirect(url_for('device.home')) @@ -169,25 +172,25 @@ def delete(device_uuid): def addconfig(device_uuid): form = ConfigForm() request = DeviceId() - request.device_uuid.uuid = device_uuid + request.device_uuid.uuid = device_uuid # pylint: disable=no-member context_client.connect() response = context_client.GetDevice(request) context_client.close() if form.validate_on_submit(): - device = Device() - device.CopyFrom(response) - config_rule = device.device_config.config_rules.add() + device_obj = Device() + device_obj.CopyFrom(response) + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = form.device_key_config.data config_rule.custom.resource_value = form.device_value_config.data try: device_client.connect() - response: DeviceId = device_client.ConfigureDevice(device) + response: DeviceId = device_client.ConfigureDevice(device_obj) device_client.close() flash(f'New configuration was created with ID "{response.device_uuid.uuid}".', 'success') return redirect(url_for('device.home')) - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem adding the device. {e.details()}', 'danger') return render_template('device/addconfig.html', form=form, submit_text='Add New Configuration') @@ -203,28 +206,29 @@ def updateconfig(): def update(device_uuid): form = UpdateDeviceForm() request = DeviceId() - request.device_uuid.uuid = device_uuid + request.device_uuid.uuid = device_uuid # pylint: disable=no-member context_client.connect() response = context_client.GetDevice(request) context_client.close() # listing enum values form.update_operational_status.choices = [] - for key, value in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items(): - form.update_operational_status.choices.append((DeviceOperationalStatusEnum.Value(key), key.replace('DEVICEOPERATIONALSTATUS_', ''))) + for key, _ in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items(): + item = (DeviceOperationalStatusEnum.Value(key), key.replace('DEVICEOPERATIONALSTATUS_', '')) + form.update_operational_status.choices.append(item) form.update_operational_status.default = response.device_operational_status if form.validate_on_submit(): - device = Device() - device.CopyFrom(response) - device.device_operational_status = form.update_operational_status.data + device_obj = Device() + device_obj.CopyFrom(response) + device_obj.device_operational_status = form.update_operational_status.data try: device_client.connect() - response: DeviceId = device_client.ConfigureDevice(device) + response: DeviceId = device_client.ConfigureDevice(device_obj) device_client.close() flash(f'Status of device with ID "{response.device_uuid.uuid}" was updated.', 'success') return redirect(url_for('device.home')) - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem updating the device. {e.details()}', 'danger') return render_template('device/update.html', device=response, form=form, submit_text='Update Device') diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py index 9324ad0be6d9e72dfd3413863f0590f6ec595c3b..0fda8958e2ab2609969d2c1f68aaae61b7360b68 100644 --- a/src/webui/service/link/routes.py +++ b/src/webui/service/link/routes.py @@ -14,10 +14,10 @@ from flask import render_template, Blueprint, flash, session, redirect, url_for -from common.proto.context_pb2 import Empty, LinkId, LinkList, TopologyId +from common.proto.context_pb2 import Empty, Link, LinkList from common.tools.context_queries.EndPoint import get_endpoint_names -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Topology import json_topology_id +from common.tools.context_queries.Link import get_link +from common.tools.context_queries.Topology import get_topology from context.client.ContextClient import ContextClient @@ -33,20 +33,21 @@ def home(): context_uuid = session['context_uuid'] topology_uuid = session['topology_uuid'] + links, endpoint_ids = list(), list() + device_names, endpoints_data = dict(), dict() + context_client.connect() - json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) - grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) - topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids} - grpc_links: LinkList = context_client.ListLinks(Empty()) - - endpoint_ids = [] - links = [] - for link_ in grpc_links.links: - if link_.link_id.link_uuid.uuid not in topo_link_uuids: continue - links.append(link_) - endpoint_ids.extend(link_.link_endpoint_ids) - - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False) + if grpc_topology is None: + flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger') + else: + topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids} + grpc_links: LinkList = context_client.ListLinks(Empty()) + for link_ in grpc_links.links: + if link_.link_id.link_uuid.uuid not in topo_link_uuids: continue + links.append(link_) + endpoint_ids.extend(link_.link_endpoint_ids) + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) context_client.close() return render_template('link/home.html', links=links, device_names=device_names, endpoints_data=endpoints_data) @@ -54,10 +55,13 @@ def home(): @link.route('detail/', methods=('GET', 'POST')) def detail(link_uuid: str): - request = LinkId() - request.link_uuid.uuid = link_uuid # pylint: disable=no-member context_client.connect() - response = context_client.GetLink(request) - device_names, endpoints_data = get_endpoint_names(context_client, response.link_endpoint_ids) + link_obj = get_link(context_client, link_uuid, rw_copy=False) + if link_obj is None: + flash('Link({:s}) not found'.format(str(link_uuid)), 'danger') + link_obj = Link() + device_names, endpoints_data = dict(), dict() + else: + device_names, endpoints_data = get_endpoint_names(context_client, link_obj.link_endpoint_ids) context_client.close() - return render_template('link/detail.html',link=response, device_names=device_names, endpoints_data=endpoints_data) + return render_template('link/detail.html',link=link_obj, device_names=device_names, endpoints_data=endpoints_data) diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py new file mode 100644 index 0000000000000000000000000000000000000000..4e0020b04f33152de382f5b93af9735f8d737f92 --- /dev/null +++ b/src/webui/service/load_gen/forms.py @@ -0,0 +1,42 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask_wtf import FlaskForm +from wtforms import BooleanField, FloatField, IntegerField, StringField, SubmitField +from wtforms.validators import DataRequired, NumberRange + +class LoadGenForm(FlaskForm): + num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)]) + num_generated = IntegerField('Num Generated', default=0, render_kw={'readonly': True}) + + request_type_service_l2nm = BooleanField('Service L2NM', default=False) + request_type_service_l3nm = BooleanField('Service L3NM', default=False) + request_type_service_mw = BooleanField('Service MW', default=False) + request_type_service_tapi = BooleanField('Service TAPI', default=False) + request_type_slice_l2nm = BooleanField('Slice L2NM', default=True) + request_type_slice_l3nm = BooleanField('Slice L3NM', default=False) + + offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=0.0)]) + holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)]) + inter_arrival_time = FloatField('Inter Arrival Time [seconds]', default=0, validators=[NumberRange(min=0.0)]) + + do_teardown = BooleanField('Do Teardown', default=True) + + record_to_dlt = BooleanField('Record to DLT', default=False) + dlt_domain_id = StringField('DLT Domain Id', default='') + + infinite_loop = BooleanField('Infinite Loop', default=False, render_kw={'disabled': True}) + running = BooleanField('Running', default=False, render_kw={'disabled': True}) + + submit = SubmitField('Start/Stop') diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py index 3118b6de0e061adac65be178163623cd2d1d8fff..5f47f06b0ff59ad1383aab94caa41adc08440c87 100644 --- a/src/webui/service/load_gen/routes.py +++ b/src/webui/service/load_gen/routes.py @@ -12,34 +12,115 @@ # See the License for the specific language governing permissions and # limitations under the License. -from flask import render_template, Blueprint, flash +from typing import Any, Optional +from flask import redirect, render_template, Blueprint, flash, url_for from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2 import Parameters, RequestTypeEnum from load_generator.client.LoadGeneratorClient import LoadGeneratorClient +from .forms import LoadGenForm load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen') -@load_gen.route('start', methods=['GET']) -def start(): +def set_properties(field, data : Any, readonly : Optional[bool] = None, disabled : Optional[bool] = None) -> None: + if not hasattr(field, 'render_kw'): + field.render_kw = dict() + elif field.render_kw is None: + field.render_kw = dict() + + if readonly is not None: + field.render_kw['readonly'] = readonly + if disabled is not None: + field.render_kw['disabled'] = disabled + + if (readonly is not None and readonly) or (disabled is not None and disabled): + field.data = data + +@load_gen.route('home', methods=['GET']) +def home(): load_gen_client = LoadGeneratorClient() - try: - load_gen_client.connect() - load_gen_client.Start(Empty()) - load_gen_client.close() - flash('Load Generator Started.', 'success') - except Exception as e: # pylint: disable=broad-except - flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger') - return render_template('main/debug.html') + load_gen_client.connect() + status = load_gen_client.GetStatus(Empty()) + load_gen_client.close() + + request_types = status.parameters.request_types + _request_type_service_l2nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM in request_types + _request_type_service_l3nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM in request_types + _request_type_service_mw = RequestTypeEnum.REQUESTTYPE_SERVICE_MW in request_types + _request_type_service_tapi = RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI in request_types + _request_type_slice_l2nm = RequestTypeEnum.REQUESTTYPE_SLICE_L2NM in request_types + _request_type_slice_l3nm = RequestTypeEnum.REQUESTTYPE_SLICE_L3NM in request_types + + _offered_load = round(status.parameters.offered_load , ndigits=4) + _holding_time = round(status.parameters.holding_time , ndigits=4) + _inter_arrival_time = round(status.parameters.inter_arrival_time , ndigits=4) + + form = LoadGenForm() + set_properties(form.num_requests , status.parameters.num_requests , readonly=status.running) + set_properties(form.offered_load , _offered_load , readonly=status.running) + set_properties(form.holding_time , _holding_time , readonly=status.running) + set_properties(form.inter_arrival_time , _inter_arrival_time , readonly=status.running) + set_properties(form.do_teardown , status.parameters.do_teardown , disabled=status.running) + set_properties(form.record_to_dlt , status.parameters.record_to_dlt, disabled=status.running) + set_properties(form.dlt_domain_id , status.parameters.dlt_domain_id, readonly=status.running) + set_properties(form.request_type_service_l2nm, _request_type_service_l2nm , disabled=status.running) + set_properties(form.request_type_service_l3nm, _request_type_service_l3nm , disabled=status.running) + set_properties(form.request_type_service_mw , _request_type_service_mw , disabled=status.running) + set_properties(form.request_type_service_tapi, _request_type_service_tapi , disabled=status.running) + set_properties(form.request_type_slice_l2nm , _request_type_slice_l2nm , disabled=status.running) + set_properties(form.request_type_slice_l3nm , _request_type_slice_l3nm , disabled=status.running) + set_properties(form.num_generated , status.num_generated , disabled=True) + set_properties(form.infinite_loop , status.infinite_loop , disabled=True) + set_properties(form.running , status.running , disabled=True) -@load_gen.route('stop', methods=['GET']) + form.submit.label.text = 'Stop' if status.running else 'Start' + form_action = url_for('load_gen.stop') if status.running else url_for('load_gen.start') + return render_template('load_gen/home.html', form=form, form_action=form_action) + +@load_gen.route('start', methods=['POST']) +def start(): + form = LoadGenForm() + if form.validate_on_submit(): + try: + load_gen_params = Parameters() + load_gen_params.num_requests = form.num_requests.data + load_gen_params.offered_load = form.offered_load.data + load_gen_params.holding_time = form.holding_time.data + load_gen_params.inter_arrival_time = form.inter_arrival_time.data + load_gen_params.do_teardown = form.do_teardown.data + load_gen_params.dry_mode = False + load_gen_params.record_to_dlt = form.record_to_dlt.data + load_gen_params.dlt_domain_id = form.dlt_domain_id.data + + del load_gen_params.request_types[:] # pylint: disable=no-member + request_types = list() + if form.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM) + if form.request_type_service_l3nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM) + if form.request_type_service_mw .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_MW ) + if form.request_type_service_tapi.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI) + if form.request_type_slice_l2nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L2NM ) + if form.request_type_slice_l3nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L3NM ) + load_gen_params.request_types.extend(request_types) # pylint: disable=no-member + + load_gen_client = LoadGeneratorClient() + load_gen_client.connect() + load_gen_client.Start(load_gen_params) + load_gen_client.close() + flash('Load Generator Started.', 'success') + except Exception as e: # pylint: disable=broad-except + flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger') + return redirect(url_for('load_gen.home')) + +@load_gen.route('stop', methods=['POST']) def stop(): - load_gen_client = LoadGeneratorClient() - try: - load_gen_client.connect() - load_gen_client.Stop(Empty()) - load_gen_client.close() - flash('Load Generator Stoped.', 'success') - except Exception as e: # pylint: disable=broad-except - flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger') - - return render_template('main/debug.html') + form = LoadGenForm() + if form.validate_on_submit(): + try: + load_gen_client = LoadGeneratorClient() + load_gen_client.connect() + load_gen_client.Stop(Empty()) + load_gen_client.close() + flash('Load Generator Stopped.', 'success') + except Exception as e: # pylint: disable=broad-except + flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger') + return redirect(url_for('load_gen.home')) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 38d13aad562f3e55490952db84ef784f87697739..32cefddf3b2a8251623b60fd9fc039588cd6b9bb 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import base64, json, logging, re +import base64, json, logging #, re from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request from common.proto.context_pb2 import ContextList, Empty, TopologyId, TopologyList from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications @@ -34,6 +34,8 @@ slice_client = SliceClient() LOGGER = logging.getLogger(__name__) +DESCRIPTOR_LOADER_NUM_WORKERS = 10 + def process_descriptors(descriptors): try: descriptors_file = request.files[descriptors.name] @@ -43,7 +45,7 @@ def process_descriptors(descriptors): flash(f'Unable to load descriptor file: {str(e)}', 'danger') return - descriptor_loader = DescriptorLoader(descriptors) + descriptor_loader = DescriptorLoader(descriptors, num_workers=DESCRIPTOR_LOADER_NUM_WORKERS) results = descriptor_loader.process() for message,level in compose_notifications(results): if level == 'error': LOGGER.warning('ERROR message={:s}'.format(str(message))) @@ -53,7 +55,7 @@ def process_descriptors(descriptors): def home(): context_client.connect() device_client.connect() - context_topology_form: ContextTopologyForm = ContextTopologyForm() + context_topology_form = ContextTopologyForm() context_topology_form.context_topology.choices.append(('', 'Select...')) contexts : ContextList = context_client.ListContexts(Empty()) @@ -85,6 +87,10 @@ def home(): #session['topology_name'] = topology_name MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.' flash(MSG, 'success') + + context_client.close() + device_client.close() + return redirect(url_for('main.home')) #match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid) @@ -99,7 +105,7 @@ def home(): if 'context_topology_uuid' in session: context_topology_form.context_topology.data = session['context_topology_uuid'] - descriptor_form: DescriptorForm = DescriptorForm() + descriptor_form = DescriptorForm() try: if descriptor_form.validate_on_submit(): process_descriptors(descriptor_form.descriptors) diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py index ee9b092ae6828d7e2a82c66b1461c2f90853a803..defbe2cb003cc97830d6ec24db01bf8734a7f530 100644 --- a/src/webui/service/service/routes.py +++ b/src/webui/service/service/routes.py @@ -14,8 +14,11 @@ import grpc from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for -from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection +from common.proto.context_pb2 import ( + IsolationLevelEnum, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection) +from common.tools.context_queries.Context import get_context from common.tools.context_queries.EndPoint import get_endpoint_names +from common.tools.context_queries.Service import get_service from context.client.ContextClient import ContextClient from service.client.ServiceClient import ServiceClient @@ -26,93 +29,94 @@ service_client = ServiceClient() @service.get('/') def home(): - # flash('This is an info message', 'info') - # flash('This is a danger message', 'danger') - - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - request = ContextId() - request.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + context_client.connect() - try: - service_list = context_client.ListServices(request) - # print(service_list) - services = service_list.services - context_found = True - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise - if e.details() != 'Context({:s}) not found'.format(context_uuid): raise - services = [] - context_found = False - - if context_found: - endpoint_ids = [] - for service_ in services: - endpoint_ids.extend(service_.service_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + + context_obj = get_context(context_client, context_uuid, rw_copy=False) + if context_obj is None: + flash('Context({:s}) not found'.format(str(context_uuid)), 'danger') + services, device_names, endpoints_data = list(), list(), list() else: - device_names, endpoints_data = [],[] + try: + services = context_client.ListServices(context_obj.context_id) + services = services.services + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise + if e.details() != 'Context({:s}) not found'.format(context_uuid): raise + services, device_names, endpoints_data = list(), dict(), dict() + else: + endpoint_ids = list() + for service_ in services: + endpoint_ids.extend(service_.service_endpoint_ids) + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) context_client.close() return render_template( 'service/home.html', services=services, device_names=device_names, endpoints_data=endpoints_data, - context_not_found=not context_found, ste=ServiceTypeEnum, sse=ServiceStatusEnum) + ste=ServiceTypeEnum, sse=ServiceStatusEnum) @service.route('add', methods=['GET', 'POST']) def add(): flash('Add service route called', 'danger') raise NotImplementedError() - return render_template('service/home.html') + #return render_template('service/home.html') @service.get('/detail') def detail(service_uuid: str): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - - request: ServiceId = ServiceId() - request.service_uuid.uuid = service_uuid - request.context_id.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + try: context_client.connect() - response: Service = context_client.GetService(request) - connections: Connection = context_client.ListConnections(request) - connections = connections.connections - endpoint_ids = [] - endpoint_ids.extend(response.service_endpoint_ids) - for connection in connections: - endpoint_ids.extend(connection.path_hops_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + endpoint_ids = list() + service_obj = get_service(context_client, service_uuid, rw_copy=False) + if service_obj is None: + flash('Context({:s})/Service({:s}) not found'.format(str(context_uuid), str(service_uuid)), 'danger') + service_obj = Service() + else: + endpoint_ids.extend(service_obj.service_endpoint_ids) + connections: Connection = context_client.ListConnections(service_obj.service_id) + connections = connections.connections + for connection in connections: endpoint_ids.extend(connection.path_hops_endpoint_ids) + + if len(endpoint_ids) > 0: + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + else: + device_names, endpoints_data = dict(), dict() context_client.close() + + return render_template( + 'service/detail.html', service=service_obj, connections=connections, device_names=device_names, + endpoints_data=endpoints_data, ste=ServiceTypeEnum, sse=ServiceStatusEnum, ile=IsolationLevelEnum) except Exception as e: flash('The system encountered an error and cannot show the details of this service.', 'warning') current_app.logger.exception(e) return redirect(url_for('service.home')) - return render_template( - 'service/detail.html', service=response, connections=connections, device_names=device_names, - endpoints_data=endpoints_data, ste=ServiceTypeEnum, sse=ServiceStatusEnum) @service.get('/delete') def delete(service_uuid: str): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) + context_uuid = session['context_uuid'] try: request = ServiceId() request.service_uuid.uuid = service_uuid request.context_id.context_uuid.uuid = context_uuid service_client.connect() - response = service_client.DeleteService(request) + service_client.DeleteService(request) service_client.close() flash('Service "{:s}" deleted successfully!'.format(service_uuid), 'success') diff --git a/src/webui/service/slice/routes.py b/src/webui/service/slice/routes.py index 222508418a187bcab18f7d44fccf896c917c6821..cd1b672d5c1014b0e8aa301ed7b5a1f6d910f6df 100644 --- a/src/webui/service/slice/routes.py +++ b/src/webui/service/slice/routes.py @@ -11,11 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# + import grpc from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for -from common.proto.context_pb2 import ContextId, Slice, SliceId, SliceStatusEnum +from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum +from common.tools.context_queries.Context import get_context from common.tools.context_queries.EndPoint import get_endpoint_names +from common.tools.context_queries.Slice import get_slice from context.client.ContextClient import ContextClient from slice.client.SliceClient import SliceClient @@ -26,92 +28,88 @@ slice_client = SliceClient() @slice.get('/') def home(): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - request = ContextId() - request.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + context_client.connect() - try: - slice_list = context_client.ListSlices(request) - slices = slice_list.slices - context_found = True - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise - if e.details() != 'Context({:s}) not found'.format(context_uuid): raise - slices = [] - context_found = False - - if context_found: - endpoint_ids = [] - for slice_ in slices: - endpoint_ids.extend(slice_.slice_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + + context_obj = get_context(context_client, context_uuid, rw_copy=False) + if context_obj is None: + flash('Context({:s}) not found'.format(str(context_uuid)), 'danger') + device_names, endpoints_data = list(), list() else: - device_names, endpoints_data = [],[] + try: + slices = context_client.ListSlices(context_obj.context_id) + slices = slices.slices + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise + if e.details() != 'Context({:s}) not found'.format(context_uuid): raise + slices, device_names, endpoints_data = list(), dict(), dict() + else: + endpoint_ids = list() + for slice_ in slices: + endpoint_ids.extend(slice_.slice_endpoint_ids) + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) context_client.close() - return render_template( 'slice/home.html', slices=slices, device_names=device_names, endpoints_data=endpoints_data, - context_not_found=not context_found, sse=SliceStatusEnum) + sse=SliceStatusEnum) @slice.route('add', methods=['GET', 'POST']) def add(): flash('Add slice route called', 'danger') raise NotImplementedError() - return render_template('slice/home.html') + #return render_template('slice/home.html') @slice.get('/detail') def detail(slice_uuid: str): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - - request: SliceId = SliceId() - request.slice_uuid.uuid = slice_uuid - request.context_id.context_uuid.uuid = context_uuid - req = ContextId() - req.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + try: context_client.connect() - response: Slice = context_client.GetSlice(request) - services = context_client.ListServices(req) - endpoint_ids = [] - endpoint_ids.extend(response.slice_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + slice_obj = get_slice(context_client, slice_uuid, rw_copy=False) + if slice_obj is None: + flash('Context({:s})/Slice({:s}) not found'.format(str(context_uuid), str(slice_uuid)), 'danger') + slice_obj = Slice() + else: + device_names, endpoints_data = get_endpoint_names(context_client, slice_obj.slice_endpoint_ids) context_client.close() + + return render_template( + 'slice/detail.html', slice=slice_obj, device_names=device_names, endpoints_data=endpoints_data, + sse=SliceStatusEnum, ile=IsolationLevelEnum) except Exception as e: flash('The system encountered an error and cannot show the details of this slice.', 'warning') current_app.logger.exception(e) return redirect(url_for('slice.home')) - return render_template( - 'slice/detail.html', slice=response, device_names=device_names, endpoints_data=endpoints_data, - sse=SliceStatusEnum, services=services) - -#@slice.get('/delete') -#def delete(slice_uuid: str): -# context_uuid = session.get('context_uuid', '-') -# if context_uuid == "-": -# flash("Please select a context!", "warning") -# return redirect(url_for("main.home")) -# -# try: -# request = SliceId() -# request.slice_uuid.uuid = slice_uuid -# request.context_id.context_uuid.uuid = context_uuid -# slice_client.connect() -# response = slice_client.DeleteSlice(request) -# slice_client.close() -# -# flash('Slice "{:s}" deleted successfully!'.format(slice_uuid), 'success') -# except Exception as e: -# flash('Problem deleting slice "{:s}": {:s}'.format(slice_uuid, str(e.details())), 'danger') -# current_app.logger.exception(e) -# return redirect(url_for('slice.home')) + +@slice.get('/delete') +def delete(slice_uuid: str): + if 'context_uuid' not in session or 'topology_uuid' not in session: + flash("Please select a context!", "warning") + return redirect(url_for("main.home")) + context_uuid = session['context_uuid'] + + try: + request = SliceId() + request.slice_uuid.uuid = slice_uuid + request.context_id.context_uuid.uuid = context_uuid + slice_client.connect() + slice_client.DeleteSlice(request) + slice_client.close() + + flash('Slice "{:s}" deleted successfully!'.format(slice_uuid), 'success') + except Exception as e: + flash('Problem deleting slice "{:s}": {:s}'.format(slice_uuid, str(e.details())), 'danger') + current_app.logger.exception(e) + return redirect(url_for('slice.home')) diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 0aa022f1453eaa33a67212174cf9687a942b10f0..1dfa3687198d8a33db346ba2bbcd2989f6f109bb 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -86,10 +86,16 @@ - + diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html new file mode 100644 index 0000000000000000000000000000000000000000..d58f42601925ca438ab9d9f20b32f94960b5cada --- /dev/null +++ b/src/webui/service/templates/load_gen/home.html @@ -0,0 +1,155 @@ + + +{% extends 'base.html' %} + +{% block content %} +

Load Generator

+
+ +
+ {{ form.hidden_tag() }} +
+
+ {{ form.num_requests.label(class="col-sm-2 col-form-label") }} +
+ {% if form.num_requests.errors %} + {{ form.num_requests(class="form-control is-invalid") }} +
+ {% for error in form.num_requests.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form.num_requests(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form.num_generated.label(class="col-sm-2 col-form-label") }} +
+ {% if form.num_generated.errors %} + {{ form.num_generated(class="form-control is-invalid") }} +
+ {% for error in form.num_generated.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form.num_generated(class="form-control") }} + {% endif %} +
+
+
+ +
+
Service Types:
+
+ {{ form.request_type_slice_l2nm }} {{ form.request_type_slice_l2nm .label(class="col-sm-3 col-form-label") }} + {{ form.request_type_slice_l3nm }} {{ form.request_type_slice_l3nm .label(class="col-sm-3 col-form-label") }} +
+ {{ form.request_type_service_l2nm }} {{ form.request_type_service_l2nm.label(class="col-sm-3 col-form-label") }} + {{ form.request_type_service_l3nm }} {{ form.request_type_service_l3nm.label(class="col-sm-3 col-form-label") }} +
+ {{ form.request_type_service_mw }} {{ form.request_type_service_mw .label(class="col-sm-3 col-form-label") }} + {{ form.request_type_service_tapi }} {{ form.request_type_service_tapi.label(class="col-sm-3 col-form-label") }} +
+
+
+ +
+ {{ form.offered_load.label(class="col-sm-2 col-form-label") }} +
+ {% if form.offered_load.errors %} + {{ form.offered_load(class="form-control is-invalid") }} +
+ {% for error in form.offered_load.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form.offered_load(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form.holding_time.label(class="col-sm-2 col-form-label") }} +
+ {% if form.holding_time.errors %} + {{ form.holding_time(class="form-control is-invalid") }} +
+ {% for error in form.holding_time.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form.holding_time(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form.inter_arrival_time.label(class="col-sm-2 col-form-label") }} +
+ {% if form.inter_arrival_time.errors %} + {{ form.inter_arrival_time(class="form-control is-invalid") }} +
+ {% for error in form.inter_arrival_time.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form.inter_arrival_time(class="form-control") }} + {% endif %} +
+
+
+ +
+
+ {{ form.do_teardown }} {{ form.do_teardown.label(class="col-sm-3 col-form-label") }}
+
+
+
+ +
+
DLT Settings:
+
+ {{ form.record_to_dlt }} {{ form.record_to_dlt.label(class="col-sm-3 col-form-label") }}
+ {{ form.dlt_domain_id.label(class="col-sm-2 col-form-label") }} + {% if form.dlt_domain_id.errors %} + {{ form.dlt_domain_id(class="form-control is-invalid") }} +
+ {% for error in form.dlt_domain_id.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form.dlt_domain_id(class="form-control") }} + {% endif %} +
+
+
+ +
+
Status:
+
+ {{ form.infinite_loop }} {{ form.infinite_loop.label(class="col-sm-3 col-form-label") }} + {{ form.running }} {{ form.running.label(class="col-sm-3 col-form-label") }} +
+
+
+ +
+ {{ form.submit(class="btn btn-primary") }} +
+
+
+ +{% endblock %} diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html index 11a868fdff9f5ee1bcbf22936ae0283d4ccc5715..eef42ae9a9f4cf386d26da0449681bab75f33b41 100644 --- a/src/webui/service/templates/main/debug.html +++ b/src/webui/service/templates/main/debug.html @@ -17,26 +17,12 @@ {% extends 'base.html' %} {% block content %} -

Debug

+

Debug API

- - -

Load Generator:

- Start - Stop + {% endblock %} diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index b2160695173064b9863834a4d42c60a69cc913ba..bee2e93c53896a8eeac826703a60afe02a5aa825 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -36,7 +36,8 @@
@@ -87,7 +88,7 @@ Kind - Type + Key/Type Value @@ -135,15 +136,43 @@ {{ constraint.endpoint_priority.priority }} + {% elif constraint.WhichOneof('constraint')=='sla_capacity' %} + + SLA Capacity + - + + {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps + + + {% elif constraint.WhichOneof('constraint')=='sla_latency' %} + + SLA E2E Latency + - + + {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms + + {% elif constraint.WhichOneof('constraint')=='sla_availability' %} SLA Availability - + {{ round(constraint.sla_availability.availability, ndigits=5) }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active + {% elif constraint.WhichOneof('constraint')=='sla_isolation' %} + + SLA Isolation + - + + {% for i,isolation_level in enumerate(constraint.sla_isolation.isolation_level) %} + {% if i > 0 %}, {% endif %} + {{ ile.Name(isolation_level) }} + {% endfor %} + + {% else %} - @@ -185,34 +214,12 @@ {% endfor %} - - - - + @@ -258,8 +265,26 @@
Connection IdSub-serviceSub-Service Path
+ + - - - -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 390f882d7058b825ecf9d2bce5689585f99b80aa..8f223e44deda37b177a360a51b1e366f680fac27 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -32,14 +32,14 @@ Update - -
--> +
--> +
-
@@ -88,7 +88,7 @@ Kind - Type + Key/Type Value @@ -136,15 +136,43 @@ {{ constraint.endpoint_priority.priority }} + {% elif constraint.WhichOneof('constraint')=='sla_capacity' %} + + SLA Capacity + - + + {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps + + + {% elif constraint.WhichOneof('constraint')=='sla_latency' %} + + SLA E2E Latency + - + + {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms + + {% elif constraint.WhichOneof('constraint')=='sla_availability' %} SLA Availability - + {{ round(constraint.sla_availability.availability, ndigits=5) }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active + {% elif constraint.WhichOneof('constraint')=='sla_isolation' %} + + SLA Isolation + - + + {% for i,isolation_level in enumerate(constraint.sla_isolation.isolation_level) %} + {% if i > 0 %}, {% endif %} + {{ ile.Name(isolation_level) }} + {% endfor %} + + {% else %} - @@ -191,7 +219,7 @@ - + @@ -219,7 +247,7 @@
Service IdSub-Services
- + @@ -244,4 +272,27 @@
Sub-slicesSub-Slices
-{% endblock %} \ No newline at end of file + + + + +{% endblock %}