diff --git a/deploy/all.sh b/deploy/all.sh
index a99607f5b907c2bd1e1b4b889bef881874a63967..6f5592cb43a5f214b2536226bb857629ad0c3cf0 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -25,14 +25,14 @@
 # By default, assume internal MicroK8s registry is used.
 export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 
-# If not already set, set the list of components you want to build images for, and deploy.
+# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
 # By default, only basic components are deployed
-export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"}
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"}
 
 # If not already set, set the tag you want to use for your images.
 export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
 
-# If not already set, set the name of the Kubernetes namespace to deploy to.
+# If not already set, set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 
 # If not already set, set additional manifest files to be applied after the deployment
@@ -41,7 +41,7 @@ export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
 # If not already set, set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
 
-# If not already set, disable skip-build flag.
+# If not already set, disable skip-build flag to rebuild the Docker images.
 # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
 export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
 
@@ -60,12 +60,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
 # If not already set, set the database name to be used by Context.
 export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 
-# If not already set, set the name of the secret where CockroachDB data and credentials will be stored.
-export CRDB_SECRET_NAME=${CRDB_SECRET_NAME:-"crdb-data"}
-
-# If not already set, set the namespace where the secret containing CockroachDB data and credentials will be stored.
-export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}}
-
 # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
 # "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
 # checking/deploying CockroachDB.
@@ -78,7 +72,7 @@ export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}}
 #   Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html
 export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
 
-# If not already set, disable flag for dropping database if exists.
+# If not already set, disable flag for dropping database, if it exists.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
 # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
 # checking/deploying CockroachDB.
@@ -96,12 +90,6 @@ export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""}
 # If not already set, set the namespace where NATS will be deployed.
 export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
 
-# If not already set, set the name of the secret where NATS data and credentials will be stored.
-export NATS_SECRET_NAME=${NATS_SECRET_NAME:-"nats-data"}
-
-# If not already set, set the namespace where the secret containing NATS data and credentials will be stored.
-export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}}
-
 # If not already set, disable flag for re-deploying NATS from scratch.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
 # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
@@ -113,20 +101,24 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
 # If not already set, set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
 
-# If not already set, set the database username to be used by Monitoring.
+# If not already set, set the database username to be used for QuestDB.
 export QDB_USERNAME=${QDB_USERNAME:-"admin"}
 
-# If not already set, set the database user's password to be used by Monitoring.
+# If not already set, set the database user's password to be used for QuestDB.
 export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
 
-# If not already set, set the table name to be used by Monitoring.
-export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"}
+# If not already set, set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
+
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 
-## If not already set, disable flag for dropping table if exists.
-## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
-## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while
-## checking/deploying QuestDB.
-#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""}
+# If not already set, disable flag for dropping tables if they exist.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
+# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
+# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while 
+# checking/deploying QuestDB.
+export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 
 # If not already set, disable flag for re-deploying QuestDB from scratch.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
diff --git a/deploy/crdb.sh b/deploy/crdb.sh
index 98d011f190196b803be27200b8bc348b30c87055..4e8cfe2c399fb0e943c90e5c585f93f0707ca835 100755
--- a/deploy/crdb.sh
+++ b/deploy/crdb.sh
@@ -66,9 +66,6 @@ CRDB_MANIFESTS_PATH="manifests/cockroachdb"
 # Create a tmp folder for files modified during the deployment
 TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
 mkdir -p $TMP_MANIFESTS_FOLDER
-TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
-mkdir -p $TMP_LOGS_FOLDER
-CRDB_LOG_FILE="$TMP_LOGS_FOLDER/crdb_deploy.log"
 
 function crdb_deploy_single() {
     echo "CockroachDB Namespace"
diff --git a/deploy/nats.sh b/deploy/nats.sh
index 115a185302236b80db385212cd772100392329af..9edbc7765a09135d62a6021c5f2b0669e36a69a4 100755
--- a/deploy/nats.sh
+++ b/deploy/nats.sh
@@ -31,14 +31,6 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
 # Automated steps start here
 ########################################################################################################################
 
-# Constants
-TMP_FOLDER="./tmp"
-NATS_MANIFESTS_PATH="manifests/nats"
-
-# Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
-mkdir -p $TMP_MANIFESTS_FOLDER
-
 function nats_deploy_single() {
     echo "NATS Namespace"
     echo ">>> Create NATS Namespace (if missing)"
diff --git a/deploy/qdb.sh b/deploy/qdb.sh
index d9a4de353b3309ef0a8a34310089e9bff31589fa..d94c000bf8d40c72faa255e7c6554926b6f683d3 100755
--- a/deploy/qdb.sh
+++ b/deploy/qdb.sh
@@ -21,20 +21,24 @@
 # If not already set, set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
 
-# If not already set, set the database username to be used by Monitoring.
+# If not already set, set the database username to be used for QuestDB.
 export QDB_USERNAME=${QDB_USERNAME:-"admin"}
 
-# If not already set, set the database user's password to be used by Monitoring.
+# If not already set, set the database user's password to be used for QuestDB.
 export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
 
-# If not already set, set the table name to be used by Monitoring.
-export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"}
+# If not already set, set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
 
-## If not already set, disable flag for dropping table if exists.
-## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
-## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while
-## checking/deploying QuestDB.
-#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""}
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
+
+# If not already set, disable flag for dropping tables if they exist.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
+# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables
+# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped
+# while checking/deploying QuestDB.
+export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 
 # If not already set, disable flag for re-deploying QuestDB from scratch.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
@@ -52,9 +56,6 @@ QDB_MANIFESTS_PATH="manifests/questdb"
 
 # Create a tmp folder for files modified during the deployment
 TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
-TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
-QDB_LOG_FILE="$TMP_LOGS_FOLDER/qdb_deploy.log"
-mkdir -p $TMP_LOGS_FOLDER
 
 function qdb_deploy() {
     echo "QuestDB Namespace"
@@ -147,19 +148,20 @@ function qdb_undeploy() {
     echo
 }
 
-# TODO: implement method to drop table
-#function qdb_drop_table() {
-#    echo "Drop table if exists"
-#    QDB_CLIENT_URL="postgresql://${QDB_USERNAME}:${QDB_PASSWORD}@questdb-0:${QDB_SQL_PORT}/defaultdb?sslmode=require"
-#    kubectl exec -it --namespace ${QDB_NAMESPACE} questdb-0 -- \
-#        ./qdb sql --certs-dir=/qdb/qdb-certs --url=${QDB_CLIENT_URL} \
-#        --execute "DROP TABLE IF EXISTS ${QDB_TABLE};"
-#    echo
-#}
+function qdb_drop_tables() {
+    QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}')
+    QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+
+    echo "Drop tables, if exist"
+    curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;"
+    echo
+    curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;"
+    echo
+}
 
 if [ "$QDB_REDEPLOY" == "YES" ]; then
     qdb_undeploy
-#elif [ "$QDB_DROP_TABLE_IF_EXISTS" == "YES" ]; then
-#    qdb_drop_table
+elif [ "$QDB_DROP_TABLES_IF_EXIST" == "YES" ]; then
+    qdb_drop_tables
 fi
 qdb_deploy
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index b9bcbab4d8084e30aae90be3cf669445d01c0dac..16cf5c13bd4532aac0267b7904c6c403d7ac057c 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -18,18 +18,21 @@
 # Read deployment settings
 ########################################################################################################################
 
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
 # If not already set, set the URL of the Docker registry where the images will be uploaded to.
 # By default, assume internal MicroK8s registry is used.
 export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 
-# If not already set, set the list of components you want to build images for, and deploy.
+# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
 # By default, only basic components are deployed
-export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"}
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"}
 
 # If not already set, set the tag you want to use for your images.
 export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
 
-# If not already set, set the name of the Kubernetes namespace to deploy to.
+# If not already set, set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
 
 # If not already set, set additional manifest files to be applied after the deployment
@@ -38,10 +41,13 @@ export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
 # If not already set, set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
 
-# If not already set, disable skip-build flag.
+# If not already set, disable skip-build flag to rebuild the Docker images.
 # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
 export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
 
+
+# ----- CockroachDB ------------------------------------------------------------
+
 # If not already set, set the namespace where CockroackDB will be deployed.
 export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
 
@@ -54,20 +60,29 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
 # If not already set, set the database name to be used by Context.
 export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 
+
+# ----- NATS -------------------------------------------------------------------
+
 # If not already set, set the namespace where NATS will be deployed.
 export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
 
+
+# ----- QuestDB ----------------------------------------------------------------
+
 # If not already set, set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
 
-# If not already set, set the database username to be used by Monitoring.
+# If not already set, set the database username to be used for QuestDB.
 export QDB_USERNAME=${QDB_USERNAME:-"admin"}
 
-# If not already set, set the database user's password to be used by Monitoring.
+# If not already set, set the database user's password to be used for QuestDB.
 export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
 
-# If not already set, set the table name to be used by Monitoring.
-export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"}
+# If not already set, set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
+
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 
 
 ########################################################################################################################
@@ -85,7 +100,7 @@ TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
 mkdir -p $TMP_LOGS_FOLDER
 
 echo "Deleting and Creating a new namespace..."
-kubectl delete namespace $TFS_K8S_NAMESPACE
+kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found
 kubectl create namespace $TFS_K8S_NAMESPACE
 printf "\n"
 
@@ -118,7 +133,8 @@ kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type='
     --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \
     --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \
     --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \
-    --from-literal=METRICSDB_TABLE=${QDB_TABLE} \
+    --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \
+    --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \
     --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \
     --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD}
 printf "\n"
@@ -301,28 +317,34 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
     # Configure Grafana Admin Password
     # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
     GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}"
-    echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..."
+
+    echo ">> Updating Grafana 'admin' password..."
     curl -X PUT -H "Content-Type: application/json" -d '{
         "oldPassword": "'${GRAFANA_PASSWORD}'",
         "newPassword": "'${TFS_GRAFANA_PASSWORD}'",
         "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
     }' ${GRAFANA_URL_DEFAULT}/api/user/password
     echo
+    echo
 
     # Updated Grafana API URL
     GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}"
     echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT
 
+    echo ">> Installing Scatter Plot plugin..."
+    curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \
+        ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install
+    echo
+
     # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
-    # TODO: replace user, password and database by variables to be saved
     QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}"
-    echo "Creating a datasource..."
+    echo ">> Creating datasources..."
     curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
         "access"   : "proxy",
         "type"     : "postgres",
-        "name"     : "questdb",
+        "name"     : "questdb-mon-kpi",
         "url"      : "'${QDB_HOST_PORT}'",
-        "database" : "'${QDB_TABLE}'",
+        "database" : "'${QDB_TABLE_MONITORING_KPIS}'",
         "user"     : "'${QDB_USERNAME}'",
         "basicAuth": false,
         "isDefault": true,
@@ -342,16 +364,51 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
     }' ${GRAFANA_URL_UPDATED}/api/datasources
     echo
 
-    # Create Monitoring Dashboard
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-slc-grp",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_SLICE_GROUPS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    printf "\n\n"
+
+    echo ">> Creating dashboards..."
     # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
-    curl -X POST -H "Content-Type: application/json" \
-        -d '@src/webui/grafana_dashboard_psql.json' \
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \
         ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    printf "\n\n"
+
+    echo ">> Staring dashboards..."
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
     echo
 
-    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tf-l3-monit"
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
     DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
     curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
 
     printf "\n\n"
 fi
diff --git a/hackfest/mock_osm/__main__.py b/hackfest/mock_osm/__main__.py
index 669da2b5e6a1729f35d2958f2d7aa68c0413287d..4ed25eaedbf4eba1f04ea41c72a751ecd7d6380b 100644
--- a/hackfest/mock_osm/__main__.py
+++ b/hackfest/mock_osm/__main__.py
@@ -58,13 +58,11 @@ SERVICE_CONNECTION_POINTS = [
 class MockOSMShell(cmd.Cmd):
     intro = 'Welcome to the MockOSM shell.\nType help or ? to list commands.\n'
     prompt = '(mock-osm) '
-    file = None
 
     def __init__(self, *args, **kwargs) -> None:
         super().__init__(*args, **kwargs)
         self.mock_osm = MockOSM(WIM_URL, WIM_PORT_MAPPING, WIM_USERNAME, WIM_PASSWORD)
 
-    # ----- basic turtle commands -----
     def do_create(self, arg):
         'Create an ELINE (L2) service'
         service_uuid = self.mock_osm.create_connectivity_service(
diff --git a/hackfest/tfs-descriptors/old/service.json b/hackfest/tfs-descriptors/old/service.json
index a25d0171dbfdbf174a877151201752c76759514a..26804dcf133fa6c83be70a72374b0f19435d24d6 100644
--- a/hackfest/tfs-descriptors/old/service.json
+++ b/hackfest/tfs-descriptors/old/service.json
@@ -18,8 +18,8 @@
                 {"device_id":{"device_uuid":{"uuid":"R2"}},"endpoint_uuid":{"uuid":"1/3"}}
             ],
             "service_constraints":[
-                {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}},
-                {"custom": {"constraint_type": "latency[ms]", "constraint_value": "20.0"}}
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 20.0}}
             ],
             "service_config":{"config_rules":[]}
         }
diff --git a/hackfest/tfs-descriptors/service-l3vpn.json b/hackfest/tfs-descriptors/service-l3vpn.json
index 457ba1a509aebc5eaea8caa37a09ac62ef286f32..723453b8b3d43a56386e15dec6f70fc368bca517 100644
--- a/hackfest/tfs-descriptors/service-l3vpn.json
+++ b/hackfest/tfs-descriptors/service-l3vpn.json
@@ -12,8 +12,8 @@
                 {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "1/2"}}
             ],
             "service_constraints": [
-                {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}},
-                {"custom": {"constraint_type": "latency[ms]", "constraint_value": "15.2"}}
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 15.2}}
             ],
             "service_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index 447f6a1c77cc6862db3df3e83b73add3257a5c0d..49e2b5943d20586941f80e8fc4b5c32c99d70f8e 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -37,6 +37,11 @@ spec:
         env:
         - name: LOG_LEVEL
           value: "INFO"
+        - name: SLICE_GROUPING
+          value: "DISABLE"
+        envFrom:
+        - secretRef:
+            name: qdb-data
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:4040"]
diff --git a/my_deploy.sh b/my_deploy.sh
index 6f0e64afe311b8e56446caabfac6329024c207a9..518b90f280a0d885169e00ce2fc728ca01f4635a 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -56,7 +56,7 @@ export CRDB_DATABASE="tfs"
 # See ./deploy/all.sh or ./deploy/crdb.sh for additional details
 export CRDB_DEPLOY_MODE="single"
 
-# Disable flag for dropping database, if exists.
+# Disable flag for dropping database, if it exists.
 export CRDB_DROP_DATABASE_IF_EXISTS=""
 
 # Disable flag for re-deploying CockroachDB from scratch.
@@ -74,20 +74,23 @@ export NATS_REDEPLOY=""
 
 # ----- QuestDB ----------------------------------------------------------------
 
-# If not already set, set the namespace where QuestDB will be deployed.
+# Set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE="qdb"
 
-# If not already set, set the database username to be used by Monitoring.
+# Set the database username to be used for QuestDB.
 export QDB_USERNAME="admin"
 
-# If not already set, set the database user's password to be used by Monitoring.
+# Set the database user's password to be used for QuestDB.
 export QDB_PASSWORD="quest"
 
-# If not already set, set the table name to be used by Monitoring.
-export QDB_TABLE="tfs_monitoring"
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
 
-## If not already set, disable flag for dropping table if exists.
-#export QDB_DROP_TABLE_IF_EXISTS=""
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
 
-# If not already set, disable flag for re-deploying QuestDB from scratch.
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST=""
+
+# Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
diff --git a/proto/context.proto b/proto/context.proto
index e403c4a22f2df62f695041c094cc1c6e6a193d5f..49d16229cdac5de84f25cfaa7d196d25184f46f0 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -509,6 +509,7 @@ message Constraint_SLA_Capacity {
 message Constraint_SLA_Availability {
   uint32 num_disjoint_paths = 1;
   bool all_active = 2;
+  float availability = 3; // 0.0 .. 100.0 percentage of availability
 }
 
 enum IsolationLevelEnum {
diff --git a/proto/load_generator.proto b/proto/load_generator.proto
index 98f6eefda88db7abac4651857326952789a879ba..86f9469588f1586da5339edad198e39e82598cde 100644
--- a/proto/load_generator.proto
+++ b/proto/load_generator.proto
@@ -18,6 +18,36 @@ package load_generator;
 import "context.proto";
 
 service LoadGeneratorService {
-  rpc Start(context.Empty) returns (context.Empty) {}
-  rpc Stop (context.Empty) returns (context.Empty) {}
+  rpc Start    (Parameters   ) returns (context.Empty) {}
+  rpc GetStatus(context.Empty) returns (Status       ) {}
+  rpc Stop     (context.Empty) returns (context.Empty) {}
+}
+
+enum RequestTypeEnum {
+  REQUESTTYPE_UNDEFINED    = 0;
+  REQUESTTYPE_SERVICE_L2NM = 1;
+  REQUESTTYPE_SERVICE_L3NM = 2;
+  REQUESTTYPE_SERVICE_MW   = 3;
+  REQUESTTYPE_SERVICE_TAPI = 4;
+  REQUESTTYPE_SLICE_L2NM   = 5;
+  REQUESTTYPE_SLICE_L3NM   = 6;
+}
+
+message Parameters {
+  uint64 num_requests = 1;  // if == 0, generate infinite requests
+  repeated RequestTypeEnum request_types = 2;
+  float offered_load = 3;
+  float holding_time = 4;
+  float inter_arrival_time = 5;
+  bool do_teardown = 6;
+  bool dry_mode = 7;
+  bool record_to_dlt = 8;
+  string dlt_domain_id = 9;
+}
+
+message Status {
+  Parameters parameters = 1;
+  uint64 num_generated = 2;
+  bool infinite_loop = 3;
+  bool running = 4;
 }
diff --git a/scripts/show_logs_load_generator.sh b/scripts/show_logs_load_generator.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d0f2527d74840d48a10e0ec7ba018f513eea2c52
--- /dev/null
+++ b/scripts/show_logs_load_generator.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/load-generatorservice
diff --git a/src/common/tests/LoadScenario.py b/src/common/tests/LoadScenario.py
deleted file mode 100644
index 93cf3708cfc5f8a4296a5cb68772984beefd7563..0000000000000000000000000000000000000000
--- a/src/common/tests/LoadScenario.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
-from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
-from service.client.ServiceClient import ServiceClient
-from slice.client.SliceClient import SliceClient
-
-LOGGER = logging.getLogger(__name__)
-LOGGERS = {
-    'success': LOGGER.info,
-    'danger' : LOGGER.error,
-    'error'  : LOGGER.error,
-}
-
-def load_scenario_from_descriptor(
-    descriptor_file : str, context_client : ContextClient, device_client : DeviceClient,
-    service_client : ServiceClient, slice_client : SliceClient
-) -> DescriptorLoader:
-    with open(descriptor_file, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
-
-    descriptor_loader = DescriptorLoader(
-        descriptors,
-        context_client=context_client, device_client=device_client,
-        service_client=service_client, slice_client=slice_client)
-    results = descriptor_loader.process()
-
-    num_errors = 0
-    for message,level in compose_notifications(results):
-        LOGGERS.get(level)(message)
-        if level != 'success': num_errors += 1
-    if num_errors > 0:
-        MSG = 'Failed to load descriptors in file {:s}'
-        raise Exception(MSG.format(str(descriptor_file)))
-
-    return descriptor_loader
\ No newline at end of file
diff --git a/src/common/tools/context_queries/Context.py b/src/common/tools/context_queries/Context.py
index d28ca3991fe7de0cdf9d069db413ff528ace4335..a627b9ba5828d31caca8332d7241d28e126895d3 100644
--- a/src/common/tools/context_queries/Context.py
+++ b/src/common/tools/context_queries/Context.py
@@ -12,7 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.proto.context_pb2 import Context, Empty
+import grpc
+from typing import Optional
+from common.proto.context_pb2 import Context, ContextId, Empty
 from common.tools.object_factory.Context import json_context
 from context.client.ContextClient import ContextClient
 
@@ -23,3 +25,17 @@ def create_context(
     existing_context_uuids = {context_id.context_uuid.uuid for context_id in existing_context_ids.context_ids}
     if context_uuid in existing_context_uuids: return
     context_client.SetContext(Context(**json_context(context_uuid)))
+
+def get_context(context_client : ContextClient, context_uuid : str, rw_copy : bool = False) -> Optional[Context]:
+    try:
+        # pylint: disable=no-member
+        context_id = ContextId()
+        context_id.context_uuid.uuid = context_uuid
+        ro_context = context_client.GetContext(context_id)
+        if not rw_copy: return ro_context
+        rw_context = Context()
+        rw_context.CopyFrom(ro_context)
+        return rw_context
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get Context({:s})'.format(str(context_uuid)))
+        return None
diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py
index 7317cc793f5dd46e6a9f741bf259635a5bd0462f..edb640708b17b6734fbde6d759db5a2cdea692ec 100644
--- a/src/common/tools/context_queries/InterDomain.py
+++ b/src/common/tools/context_queries/InterDomain.py
@@ -136,13 +136,11 @@ def compute_interdomain_path(
         service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add()
         service_endpoint_id.CopyFrom(endpoint_id)
     
-    constraint_bw = pathcomp_req_svc.service_constraints.add()
-    constraint_bw.custom.constraint_type = 'bandwidth[gbps]'
-    constraint_bw.custom.constraint_value = '10.0'
+    constraint_sla_capacity = pathcomp_req_svc.service_constraints.add()
+    constraint_sla_capacity.sla_capacity.capacity_gbps = 10.0
 
-    constraint_lat = pathcomp_req_svc.service_constraints.add()
-    constraint_lat.custom.constraint_type = 'latency[ms]'
-    constraint_lat.custom.constraint_value = '100.0'
+    constraint_sla_latency = pathcomp_req_svc.service_constraints.add()
+    constraint_sla_latency.sla_latency.e2e_latency_ms = 100.0
 
     LOGGER.debug('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req)))
     pathcomp_rep = pathcomp_client.Compute(pathcomp_req)
diff --git a/src/common/tools/context_queries/Link.py b/src/common/tools/context_queries/Link.py
index 83a878bde85ddfe25bc345ed987670164bacf2c6..291cdcf375d942b72008daea5c2c5ff357a994ef 100644
--- a/src/common/tools/context_queries/Link.py
+++ b/src/common/tools/context_queries/Link.py
@@ -12,11 +12,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import List, Set
-from common.proto.context_pb2 import ContextId, Empty, Link, Topology, TopologyId
+import grpc
+from typing import List, Optional, Set
+from common.proto.context_pb2 import ContextId, Empty, Link, LinkId, Topology, TopologyId
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 
+def get_link(context_client : ContextClient, link_uuid : str, rw_copy : bool = False) -> Optional[Link]:
+    try:
+        # pylint: disable=no-member
+        link_id = LinkId()
+        link_id.link_uuid.uuid = link_uuid
+        ro_link = context_client.GetLink(link_id)
+        if not rw_copy: return ro_link
+        rw_link = Link()
+        rw_link.CopyFrom(ro_link)
+        return rw_link
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get Link({:s})'.format(str(link_uuid)))
+        return None
+
 def get_existing_link_uuids(context_client : ContextClient) -> Set[str]:
     existing_link_ids = context_client.ListLinkIds(Empty())
     existing_link_uuids = {link_id.link_uuid.uuid for link_id in existing_link_ids.link_ids}
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
index 5972d425be5298ec7fcb63bd28b50f3643363ae4..0e1d8c7371e87b47bfc47a4242e00039add48e7f 100644
--- a/src/common/tools/descriptor/Loader.py
+++ b/src/common/tools/descriptor/Loader.py
@@ -15,25 +15,30 @@
 # SDN controller descriptor loader
 
 # Usage example (WebUI):
-#    descriptors = json.loads(descriptors_data_from_client)
+#    descriptors = json.loads(
+#       descriptors=descriptors_data_from_client, num_workers=10,
+#       context_client=..., device_client=..., service_client=..., slice_client=...)
 #    descriptor_loader = DescriptorLoader(descriptors)
 #    results = descriptor_loader.process()
 #    for message,level in compose_notifications(results):
 #        flash(message, level)
 
 # Usage example (pytest):
-#    with open('path/to/descriptor.json', 'r', encoding='UTF-8') as f:
-#        descriptors = json.loads(f.read())
 #    descriptor_loader = DescriptorLoader(
-#       descriptors, context_client=..., device_client=..., service_client=..., slice_client=...)
+#       descriptors_file='path/to/descriptor.json', num_workers=10,
+#       context_client=..., device_client=..., service_client=..., slice_client=...)
 #    results = descriptor_loader.process()
-#    loggers = {'success': LOGGER.info, 'danger': LOGGER.error, 'error': LOGGER.error}
-#    for message,level in compose_notifications(results):
-#        loggers.get(level)(message)
+#    check_results(results, descriptor_loader)
+#    descriptor_loader.validate()
+#    # do test ...
+#    descriptor_loader.unload()
 
 import concurrent.futures, json, logging, operator
 from typing import Any, Dict, List, Optional, Tuple, Union
-from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology
+from common.proto.context_pb2 import (
+    Connection, Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Service, ServiceId, Slice, SliceId,
+    Topology, TopologyId)
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
@@ -44,6 +49,11 @@ from .Tools import (
     get_descriptors_add_topologies, split_devices_by_rules)
 
 LOGGER = logging.getLogger(__name__)
+LOGGERS = {
+    'success': LOGGER.info,
+    'danger' : LOGGER.error,
+    'error'  : LOGGER.error,
+}
 
 ENTITY_TO_TEXT = {
     # name   => singular,    plural
@@ -67,25 +77,26 @@ TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_o
 TypeNotification = Tuple[str, str] # message, level
 TypeNotificationList = List[TypeNotification]
 
-def compose_notifications(results : TypeResults) -> TypeNotificationList:
-    notifications = []
-    for entity_name, action_name, num_ok, error_list in results:
-        entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name]
-        action_infinitive, action_past = ACTION_TO_TEXT[action_name]
-        num_err = len(error_list)
-        for error in error_list:
-            notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error'))
-        if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success'))
-        if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger'))
-    return notifications
-
 class DescriptorLoader:
     def __init__(
-        self, descriptors : Union[str, Dict], num_workers : int = 1,
+        self, descriptors : Optional[Union[str, Dict]] = None, descriptors_file : Optional[str] = None,
+        num_workers : int = 1,
         context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None,
         service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None
     ) -> None:
-        self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors
+        if (descriptors is None) == (descriptors_file is None):
+            raise Exception('Exactly one of "descriptors" or "descriptors_file" is required')
+        
+        if descriptors_file is not None:
+            with open(descriptors_file, 'r', encoding='UTF-8') as f:
+                self.__descriptors = json.loads(f.read())
+            self.__descriptor_file_path = descriptors_file
+        else: # descriptors is not None
+            self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors
+            self.__descriptor_file_path = '<dict>'
+
+        self.__num_workers = num_workers
+
         self.__dummy_mode  = self.__descriptors.get('dummy_mode' , False)
         self.__contexts    = self.__descriptors.get('contexts'   , [])
         self.__topologies  = self.__descriptors.get('topologies' , [])
@@ -95,8 +106,6 @@ class DescriptorLoader:
         self.__slices      = self.__descriptors.get('slices'     , [])
         self.__connections = self.__descriptors.get('connections', [])
 
-        self.__num_workers = num_workers
-
         self.__contexts_add   = None
         self.__topologies_add = None
         self.__devices_add    = None
@@ -111,6 +120,24 @@ class DescriptorLoader:
 
         self.__results : TypeResults = list()
 
+    @property
+    def descriptor_file_path(self) -> Optional[str]: return self.__descriptor_file_path
+
+    @property
+    def num_workers(self) -> int: return self.__num_workers
+
+    @property
+    def context_client(self) -> Optional[ContextClient]: return self.__ctx_cli
+
+    @property
+    def device_client(self) -> Optional[DeviceClient]: return self.__dev_cli
+
+    @property
+    def service_client(self) -> Optional[ServiceClient]: return self.__svc_cli
+
+    @property
+    def slice_client(self) -> Optional[SliceClient]: return self.__slc_cli
+
     @property
     def contexts(self) -> List[Dict]: return self.__contexts
 
@@ -269,3 +296,85 @@ class DescriptorLoader:
 
         error_list = [str_error for _,str_error in sorted(error_list, key=operator.itemgetter(0))]
         self.__results.append((entity_name, action_name, num_ok, error_list))
+
+    def validate(self) -> None:
+        self.__ctx_cli.connect()
+
+        contexts = self.__ctx_cli.ListContexts(Empty())
+        assert len(contexts.contexts) == self.num_contexts
+
+        for context_uuid, num_topologies in self.num_topologies.items():
+            response = self.__ctx_cli.ListTopologies(ContextId(**json_context_id(context_uuid)))
+            assert len(response.topologies) == num_topologies
+
+        response = self.__ctx_cli.ListDevices(Empty())
+        assert len(response.devices) == self.num_devices
+
+        response = self.__ctx_cli.ListLinks(Empty())
+        assert len(response.links) == self.num_links
+
+        for context_uuid, num_services in self.num_services.items():
+            response = self.__ctx_cli.ListServices(ContextId(**json_context_id(context_uuid)))
+            assert len(response.services) == num_services
+
+        for context_uuid, num_slices in self.num_slices.items():
+            response = self.__ctx_cli.ListSlices(ContextId(**json_context_id(context_uuid)))
+            assert len(response.slices) == num_slices
+
+    def unload(self) -> None:
+        self.__ctx_cli.connect()
+        self.__dev_cli.connect()
+        self.__svc_cli.connect()
+        self.__slc_cli.connect()
+
+        for _, slice_list in self.slices.items():
+            for slice_ in slice_list:
+                self.__slc_cli.DeleteSlice(SliceId(**slice_['slice_id']))
+
+        for _, service_list in self.services.items():
+            for service in service_list:
+                self.__svc_cli.DeleteService(ServiceId(**service['service_id']))
+
+        for link in self.links:
+            self.__ctx_cli.RemoveLink(LinkId(**link['link_id']))
+
+        for device in self.devices:
+            self.__dev_cli.DeleteDevice(DeviceId(**device['device_id']))
+
+        for _, topology_list in self.topologies.items():
+            for topology in topology_list:
+                self.__ctx_cli.RemoveTopology(TopologyId(**topology['topology_id']))
+
+        for context in self.contexts:
+            self.__ctx_cli.RemoveContext(ContextId(**context['context_id']))
+
+def compose_notifications(results : TypeResults) -> TypeNotificationList:
+    notifications = []
+    for entity_name, action_name, num_ok, error_list in results:
+        entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name]
+        action_infinitive, action_past = ACTION_TO_TEXT[action_name]
+        num_err = len(error_list)
+        for error in error_list:
+            notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error'))
+        if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success'))
+        if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger'))
+    return notifications
+
+def check_descriptor_load_results(results : TypeResults, descriptor_loader : DescriptorLoader) -> None:
+    num_errors = 0
+    for message,level in compose_notifications(results):
+        LOGGERS.get(level)(message)
+        if level != 'success': num_errors += 1
+    if num_errors > 0:
+        MSG = 'Failed to load descriptors from "{:s}"'
+        raise Exception(MSG.format(str(descriptor_loader.descriptor_file_path)))
+
+def validate_empty_scenario(context_client : ContextClient) -> None:
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == 0
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 0
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == 0
diff --git a/src/common/tools/descriptor/Tools.py b/src/common/tools/descriptor/Tools.py
index 9d6275748e6e35eaf240f80f100e993334d4c5ea..f03c635b802e5c003a6ea80af46ef740b97e500b 100644
--- a/src/common/tools/descriptor/Tools.py
+++ b/src/common/tools/descriptor/Tools.py
@@ -72,7 +72,7 @@ def format_service_custom_config_rules(service : Dict) -> Dict:
     return service
 
 def format_slice_custom_config_rules(slice_ : Dict) -> Dict:
-    config_rules = slice_.get('service_config', {}).get('config_rules', [])
+    config_rules = slice_.get('slice_config', {}).get('config_rules', [])
     config_rules = format_custom_config_rules(config_rules)
     slice_['slice_config']['config_rules'] = config_rules
     return slice_
diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py
index 53f7dfd9822eb3a2efd48bf1b628547339a3ca69..07f0b7782dbd93479774af6324683753f906c5a1 100644
--- a/src/common/tools/grpc/Constraints.py
+++ b/src/common/tools/grpc/Constraints.py
@@ -17,7 +17,7 @@
 
 
 import json
-from typing import Any, Dict, Optional, Tuple
+from typing import Any, Dict, List, Optional, Tuple
 from common.proto.context_pb2 import Constraint, EndPointId
 from common.tools.grpc.Tools import grpc_message_to_json_string
 
@@ -137,7 +137,31 @@ def update_constraint_endpoint_priority(constraints, endpoint_id : EndPointId, p
     constraint.endpoint_priority.priority = priority
     return constraint
 
-def update_constraint_sla_availability(constraints, num_disjoint_paths : int, all_active : bool) -> Constraint:
+def update_constraint_sla_capacity(constraints, capacity_gbps : float) -> Constraint:
+    for constraint in constraints:
+        if constraint.WhichOneof('constraint') != 'sla_capacity': continue
+        break   # found, end loop
+    else:
+        # not found, add it
+        constraint = constraints.add()      # pylint: disable=no-member
+
+    constraint.sla_capacity.capacity_gbps = capacity_gbps
+    return constraint
+
+def update_constraint_sla_latency(constraints, e2e_latency_ms : float) -> Constraint:
+    for constraint in constraints:
+        if constraint.WhichOneof('constraint') != 'sla_latency': continue
+        break   # found, end loop
+    else:
+        # not found, add it
+        constraint = constraints.add()      # pylint: disable=no-member
+
+    constraint.sla_latency.e2e_latency_ms = e2e_latency_ms
+    return constraint
+
+def update_constraint_sla_availability(
+    constraints, num_disjoint_paths : int, all_active : bool, availability : float
+) -> Constraint:
     for constraint in constraints:
         if constraint.WhichOneof('constraint') != 'sla_availability': continue
         break   # found, end loop
@@ -147,8 +171,21 @@ def update_constraint_sla_availability(constraints, num_disjoint_paths : int, al
 
     constraint.sla_availability.num_disjoint_paths = num_disjoint_paths
     constraint.sla_availability.all_active = all_active
+    constraint.sla_availability.availability = availability
     return constraint
 
+def update_constraint_sla_isolation(constraints, isolation_levels : List[int]) -> Constraint:
+    for constraint in constraints:
+        if constraint.WhichOneof('constraint') != 'sla_isolation': continue
+        break   # found, end loop
+    else:
+        # not found, add it
+        constraint = constraints.add()      # pylint: disable=no-member
+
+    for isolation_level in isolation_levels:
+        if isolation_level in constraint.sla_isolation.isolation_level: continue
+        constraint.sla_isolation.isolation_level.append(isolation_level)
+    return constraint
 
 def copy_constraints(source_constraints, target_constraints):
     for source_constraint in source_constraints:
@@ -189,11 +226,27 @@ def copy_constraints(source_constraints, target_constraints):
             priority = source_constraint.endpoint_priority.priority
             update_constraint_endpoint_priority(target_constraints, endpoint_id, priority)
 
+        elif constraint_kind == 'sla_capacity':
+            sla_capacity = source_constraint.sla_capacity
+            capacity_gbps = sla_capacity.capacity_gbps
+            update_constraint_sla_capacity(target_constraints, capacity_gbps)
+
+        elif constraint_kind == 'sla_latency':
+            sla_latency = source_constraint.sla_latency
+            e2e_latency_ms = sla_latency.e2e_latency_ms
+            update_constraint_sla_latency(target_constraints, e2e_latency_ms)
+
         elif constraint_kind == 'sla_availability':
             sla_availability = source_constraint.sla_availability
             num_disjoint_paths = sla_availability.num_disjoint_paths
             all_active = sla_availability.all_active
-            update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active)
+            availability = sla_availability.availability
+            update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active, availability)
+
+        elif constraint_kind == 'sla_isolation':
+            sla_isolation = source_constraint.sla_isolation
+            isolation_levels = sla_isolation.isolation_level
+            update_constraint_sla_isolation(target_constraints, isolation_levels)
 
         else:
             raise NotImplementedError('Constraint({:s})'.format(grpc_message_to_json_string(source_constraint)))
diff --git a/src/common/tools/mutex_queues/MutexQueues.py b/src/common/tools/mutex_queues/MutexQueues.py
index b9fc567d561287ed5d92f51a3cab0f92d58d88ed..96e22a86f012cb8326c380a0ebbf0c1b40cae21c 100644
--- a/src/common/tools/mutex_queues/MutexQueues.py
+++ b/src/common/tools/mutex_queues/MutexQueues.py
@@ -35,7 +35,7 @@
 #           self.mutex_queues.signal_done(device_uuid)
 
 import threading
-from queue import Queue
+from queue import Queue, Empty
 from typing import Dict
 
 class MutexQueues:
@@ -67,8 +67,11 @@ class MutexQueues:
         with self.lock:
             queue : Queue = self.mutex_queues.setdefault(queue_name, Queue())
             
-            # remove muself from the queue
-            queue.get_nowait()
+            # remove myself from the queue
+            try:
+                queue.get(block=True, timeout=0.1)
+            except Empty:
+                pass
 
             # if there are no other tasks queued, return
             if queue.qsize() == 0: return
diff --git a/src/common/tools/object_factory/Constraint.py b/src/common/tools/object_factory/Constraint.py
index e3c5129fd5bda5fb4a6659fc39b208bbdf0bb40f..ef00e3872343196f0a9f8de97d3b1ab6fc12d847 100644
--- a/src/common/tools/object_factory/Constraint.py
+++ b/src/common/tools/object_factory/Constraint.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import json
-from typing import Any, Dict, Union
+from typing import Any, Dict, List, Union
 
 def json_constraint_custom(constraint_type : str, constraint_value : Union[str, Dict[str, Any]]) -> Dict:
     if not isinstance(constraint_value, str): constraint_value = json.dumps(constraint_value, sort_keys=True)
@@ -29,5 +29,16 @@ def json_constraint_endpoint_location_gps(endpoint_id : Dict, latitude : float,
 def json_constraint_endpoint_priority(endpoint_id : Dict, priority : int) -> Dict:
     return {'endpoint_priority': {'endpoint_id': endpoint_id, 'priority': priority}}
 
-def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool) -> Dict:
-    return {'sla_availability': {'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active}}
+def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool, availability : float) -> Dict:
+    return {'sla_availability': {
+        'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active, 'availability': availability
+    }}
+
+def json_constraint_sla_capacity(capacity_gbps : float) -> Dict:
+    return {'sla_capacity': {'capacity_gbps': capacity_gbps}}
+
+def json_constraint_sla_isolation(isolation_levels : List[int]) -> Dict:
+    return {'sla_isolation': {'isolation_level': isolation_levels}}
+
+def json_constraint_sla_latency(e2e_latency_ms : float) -> Dict:
+    return {'sla_latency': {'e2e_latency_ms': e2e_latency_ms}}
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index da453d38fbdd9a3d256729e1fd4e19f1fd005b58..ff7ad3c1481d3c0f3cdf7a6b6004f62677948ecc 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -112,7 +112,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
             location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id)
         num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()}
         num_disjoint_paths = min(num_endpoints_per_location)
-        update_constraint_sla_availability(constraints, num_disjoint_paths, all_active)
+        update_constraint_sla_availability(constraints, num_disjoint_paths, all_active, 0.0)
 
     return target
 
diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py
index dd60441ca70329b9431188e28c21d98d941ada14..09723cc6f6b31e2496bf5ab475f50d0aa58f95c2 100644
--- a/src/context/service/database/ConfigRule.py
+++ b/src/context/service/database/ConfigRule.py
@@ -80,7 +80,7 @@ def compose_config_rules_data(
     return dict_config_rules
 
 def upsert_config_rules(
-    session : Session, config_rules : List[Dict],
+    session : Session, config_rules : List[Dict], is_delete : bool = False,
     device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None,
 ) -> bool:
     uuids_to_delete : Set[str] = set()
@@ -89,7 +89,9 @@ def upsert_config_rules(
     for config_rule in config_rules:
         configrule_uuid = config_rule['configrule_uuid']
         configrule_action = config_rule['action']
-        if configrule_action == ORM_ConfigActionEnum.SET:
+        if is_delete or configrule_action == ORM_ConfigActionEnum.DELETE:
+            uuids_to_delete.add(configrule_uuid)
+        elif configrule_action == ORM_ConfigActionEnum.SET:
             position = uuids_to_upsert.get(configrule_uuid)
             if position is None:
                 # if not added, add it
@@ -98,8 +100,6 @@ def upsert_config_rules(
             else:
                 # if already added, update occurrence
                 rules_to_upsert[position] = config_rule
-        elif configrule_action == ORM_ConfigActionEnum.DELETE:
-            uuids_to_delete.add(configrule_uuid)
         else:
             MSG = 'Action for ConfigRule({:s}) is not supported '+\
                   '(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})'
diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py
index 0540841c3a570f9a1e28ec530998b115f73a62a7..3a73f6589f9332aa4c84f8f296f2cb56db3048bf 100644
--- a/src/context/service/database/Constraint.py
+++ b/src/context/service/database/Constraint.py
@@ -66,7 +66,7 @@ def compose_constraints_data(
             constraint_name = '{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid)
         elif kind in {
             ConstraintKindEnum.SCHEDULE, ConstraintKindEnum.SLA_CAPACITY, ConstraintKindEnum.SLA_LATENCY,
-            ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION_LEVEL
+            ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION
         }:
             constraint_name = '{:s}:{:s}:'.format(parent_kind, kind.value)
         else:
@@ -81,7 +81,7 @@ def compose_constraints_data(
     return dict_constraints
 
 def upsert_constraints(
-    session : Session, constraints : List[Dict],
+    session : Session, constraints : List[Dict], is_delete : bool = False,
     service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None
 ) -> bool:
     uuids_to_upsert : Dict[str, int] = dict()
@@ -107,11 +107,11 @@ def upsert_constraints(
         #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True})
         #LOGGER.warning('delete stmt={:s}'.format(str(str_stmt)))
         constraint_deletes = session.execute(stmt)
-        LOGGER.warning('constraint_deletes.rowcount={:s}'.format(str(constraint_deletes.rowcount)))
+        #LOGGER.warning('constraint_deletes.rowcount={:s}'.format(str(constraint_deletes.rowcount)))
         delete_affected = int(constraint_deletes.rowcount) > 0
 
     upsert_affected = False
-    if len(constraints) > 0:
+    if not is_delete and len(constraints) > 0:
         stmt = insert(ConstraintModel).values(constraints)
         stmt = stmt.on_conflict_do_update(
             index_elements=[ConstraintModel.constraint_uuid],
diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py
index a100103890f293d418b4c70a7948ad9687ffe5b3..e95cec4ae533795b23b8fd4e2f26ac9000c1bcce 100644
--- a/src/context/service/database/PolicyRule.py
+++ b/src/context/service/database/PolicyRule.py
@@ -65,7 +65,7 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule
 
     policyrule_kind  = PolicyRuleKindEnum._member_map_.get(policyrule_kind.upper()) # pylint: disable=no-member
     policyrule_state = grpc_to_enum__policyrule_state(policyrule_basic.policyRuleState.policyRuleState)
-    policyrule_state_message = policyrule_basic.policyRuleState.policyRuleStateMessage
+    policyrule_state_msg = policyrule_basic.policyRuleState.policyRuleStateMessage
 
     json_policyrule_basic = grpc_message_to_json(policyrule_basic)
     policyrule_eca_data = json.dumps({
@@ -77,15 +77,15 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule
     now = datetime.datetime.utcnow()
 
     policyrule_data = [{
-        'policyrule_uuid'         : policyrule_uuid,
-        'policyrule_kind'         : policyrule_kind,
-        'policyrule_state'        : policyrule_state,
-        'policyrule_state_message': policyrule_state_message,
-        'policyrule_priority'     : policyrule_basic.priority,
-        'policyrule_eca_data'     : policyrule_eca_data,
-        'created_at'              : now,
-        'updated_at'              : now,
-    }]
+        'policyrule_uuid'     : policyrule_uuid,
+        'policyrule_kind'     : policyrule_kind,
+        'policyrule_state'    : policyrule_state,
+        'policyrule_state_msg': policyrule_state_msg,
+        'policyrule_priority' : policyrule_basic.priority,
+        'policyrule_eca_data' : policyrule_eca_data,
+        'created_at'          : now,
+        'updated_at'          : now,
+    }] 
 
     policyrule_service_uuid = None
     if policyrule_kind == PolicyRuleKindEnum.SERVICE:
@@ -108,11 +108,11 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule
         stmt = stmt.on_conflict_do_update(
             index_elements=[PolicyRuleModel.policyrule_uuid],
             set_=dict(
-                policyrule_state         = stmt.excluded.policyrule_state,
-                policyrule_state_message = stmt.excluded.policyrule_state_message,
-                policyrule_priority      = stmt.excluded.policyrule_priority,
-                policyrule_eca_data      = stmt.excluded.policyrule_eca_data,
-                updated_at               = stmt.excluded.updated_at,
+                policyrule_state     = stmt.excluded.policyrule_state,
+                policyrule_state_msg = stmt.excluded.policyrule_state_msg,
+                policyrule_priority  = stmt.excluded.policyrule_priority,
+                policyrule_eca_data  = stmt.excluded.policyrule_eca_data,
+                updated_at           = stmt.excluded.updated_at,
             )
         )
         stmt = stmt.returning(PolicyRuleModel.created_at, PolicyRuleModel.updated_at)
diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py
index 4b63a4ae56fa278679d145f1da2c62e767f73005..a81a80c3c2398fed16842bcc3d8aa16342edb72b 100644
--- a/src/context/service/database/Service.py
+++ b/src/context/service/database/Service.py
@@ -77,8 +77,11 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
     service_endpoints_data : List[Dict] = list()
     for i,endpoint_id in enumerate(request.service_endpoint_ids):
         endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-        if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid
-        if endpoint_context_uuid not in {raw_context_uuid, context_uuid}:
+        if len(endpoint_context_uuid) == 0:
+            endpoint_context_uuid = context_get_uuid(request.service_id.context_id, allow_random=False)
+        else:
+            endpoint_context_uuid = context_get_uuid(endpoint_id.topology_id.context_id, allow_random=False)
+        if endpoint_context_uuid != context_uuid:
             raise InvalidArgumentException(
                 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
                 endpoint_context_uuid,
diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py
index 7c291e33d858841054adc59306fbedb2e9a18f79..1d6781d53f7c85d8cb878b1b38b0de65b4ef5726 100644
--- a/src/context/service/database/Slice.py
+++ b/src/context/service/database/Slice.py
@@ -77,8 +77,11 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
     slice_endpoints_data : List[Dict] = list()
     for i,endpoint_id in enumerate(request.slice_endpoint_ids):
         endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-        if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid
-        if endpoint_context_uuid not in {raw_context_uuid, context_uuid}:
+        if len(endpoint_context_uuid) == 0:
+            endpoint_context_uuid = context_get_uuid(request.slice_id.context_id, allow_random=False)
+        else:
+            endpoint_context_uuid = context_get_uuid(endpoint_id.topology_id.context_id, allow_random=False)
+        if endpoint_context_uuid != context_uuid:
             raise InvalidArgumentException(
                 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
                 endpoint_context_uuid,
@@ -175,10 +178,6 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
     slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name
     context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=False)
 
-    if len(request.slice_constraints) > 0:         raise NotImplementedError('UnsetSlice: removal of constraints')
-    if len(request.slice_config.config_rules) > 0: raise NotImplementedError('UnsetSlice: removal of config rules')
-    if len(request.slice_endpoint_ids) > 0:        raise NotImplementedError('UnsetSlice: removal of endpoints')
-
     slice_endpoint_uuids : Set[str] = set()
     for i,endpoint_id in enumerate(request.slice_endpoint_ids):
         endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
@@ -200,6 +199,10 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
         for subslice_id in request.slice_subslice_ids
     }
 
+    now = datetime.datetime.utcnow()
+    constraints = compose_constraints_data(request.slice_constraints, now, slice_uuid=slice_uuid)
+    config_rules = compose_config_rules_data(request.slice_config.config_rules, now, slice_uuid=slice_uuid)
+
     def callback(session : Session) -> bool:
         num_deletes = 0
         if len(slice_service_uuids) > 0:
@@ -210,17 +213,21 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
                 )).delete()
         if len(slice_subslice_uuids) > 0:
             num_deletes += session.query(SliceSubSliceModel)\
-                .filter_by(and_(
+                .filter(and_(
                     SliceSubSliceModel.slice_uuid == slice_uuid,
                     SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids)
                 )).delete()
         if len(slice_endpoint_uuids) > 0:
             num_deletes += session.query(SliceEndPointModel)\
-                .filter_by(and_(
+                .filter(and_(
                     SliceEndPointModel.slice_uuid == slice_uuid,
                     SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids)
                 )).delete()
-        return num_deletes > 0
+
+        changed_constraints = upsert_constraints(session, constraints, is_delete=True, slice_uuid=slice_uuid)
+        changed_config_rules = upsert_config_rules(session, config_rules, is_delete=True, slice_uuid=slice_uuid)
+
+        return num_deletes > 0 or changed_constraints or changed_config_rules
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated
diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py
index 363611105135661ccf3bd001c2e65ab75f9b6a6c..d7bb97cd0fec1037e98c8713b885b2d5141cae63 100644
--- a/src/context/service/database/models/ConfigRuleModel.py
+++ b/src/context/service/database/models/ConfigRuleModel.py
@@ -28,9 +28,9 @@ class ConfigRuleModel(_Base):
     __tablename__ = 'configrule'
 
     configrule_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    device_uuid     = Column(ForeignKey('device.device_uuid',   ondelete='CASCADE'), nullable=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True)
-    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid',   ondelete='CASCADE'), nullable=True, index=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True, index=True)
     position        = Column(Integer, nullable=False)
     kind            = Column(Enum(ConfigRuleKindEnum), nullable=False)
     action          = Column(Enum(ORM_ConfigActionEnum), nullable=False)
diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py
index c2b20de202cbeb065ffd50683d015729c76af9bc..156e33c6bb32e237af241035f1d9672b0b419222 100644
--- a/src/context/service/database/models/ConnectionModel.py
+++ b/src/context/service/database/models/ConnectionModel.py
@@ -25,7 +25,7 @@ class ConnectionModel(_Base):
     __tablename__ = 'connection'
 
     connection_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False, index=True)
     settings        = Column(String, nullable=False)
     created_at      = Column(DateTime, nullable=False)
     updated_at      = Column(DateTime, nullable=False)
@@ -56,7 +56,7 @@ class ConnectionEndPointModel(_Base):
     __tablename__ = 'connection_endpoint'
 
     connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid',     ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position        = Column(Integer, nullable=False)
 
     connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined')
@@ -70,7 +70,7 @@ class ConnectionSubServiceModel(_Base):
     __tablename__ = 'connection_subservice'
 
     connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
-    subservice_uuid = Column(ForeignKey('service.service_uuid',       ondelete='RESTRICT'), primary_key=True)
+    subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined')
     subservice = relationship('ServiceModel',    lazy='joined') # back_populates='connection_subservices'
diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py
index 01c7bcb76b00ac5d8b49d9f99f010d1ddfd30788..2412080c1a2883e7bed85e6e22f389270b3f73bc 100644
--- a/src/context/service/database/models/ConstraintModel.py
+++ b/src/context/service/database/models/ConstraintModel.py
@@ -19,22 +19,24 @@ from typing import Dict
 from ._Base import _Base
 
 # Enum values should match name of field in Constraint message
+# - enum item name should be Constraint message type in upper case
+# - enum item value should be Constraint message type as it is in the proto files
 class ConstraintKindEnum(enum.Enum):
-    CUSTOM              = 'custom'
-    SCHEDULE            = 'schedule'
-    ENDPOINT_LOCATION   = 'endpoint_location'
-    ENDPOINT_PRIORITY   = 'endpoint_priority'
-    SLA_CAPACITY        = 'sla_capacity'
-    SLA_LATENCY         = 'sla_latency'
-    SLA_AVAILABILITY    = 'sla_availability'
-    SLA_ISOLATION_LEVEL = 'sla_isolation'
+    CUSTOM            = 'custom'
+    SCHEDULE          = 'schedule'
+    ENDPOINT_LOCATION = 'endpoint_location'
+    ENDPOINT_PRIORITY = 'endpoint_priority'
+    SLA_CAPACITY      = 'sla_capacity'
+    SLA_LATENCY       = 'sla_latency'
+    SLA_AVAILABILITY  = 'sla_availability'
+    SLA_ISOLATION     = 'sla_isolation'
 
 class ConstraintModel(_Base):
     __tablename__ = 'constraint'
 
     constraint_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True)
-    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True, index=True)
     position        = Column(Integer, nullable=False)
     kind            = Column(Enum(ConstraintKindEnum), nullable=False)
     data            = Column(String, nullable=False)
diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py
index e591bc718711c6e0b8219eb60ce68c42f35a800c..12ba7e10e7c3d5789f9bf16ad7b4f50c35a36bf5 100644
--- a/src/context/service/database/models/EndPointModel.py
+++ b/src/context/service/database/models/EndPointModel.py
@@ -23,8 +23,8 @@ class EndPointModel(_Base):
     __tablename__ = 'endpoint'
 
     endpoint_uuid    = Column(UUID(as_uuid=False), primary_key=True)
-    device_uuid      = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), nullable=False)
-    topology_uuid    = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False)
+    device_uuid      = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), nullable=False, index=True)
+    topology_uuid    = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False, index=True)
     name             = Column(String, nullable=False)
     endpoint_type    = Column(String, nullable=False)
     kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1))
diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py
index 49c62d376624dc02b51a2b56860b04c322d66934..ee591f5c8404cd7f0f6c97651b5f731a51c43303 100644
--- a/src/context/service/database/models/LinkModel.py
+++ b/src/context/service/database/models/LinkModel.py
@@ -46,7 +46,7 @@ class LinkEndPointModel(_Base):
     __tablename__ = 'link_endpoint'
 
     link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     link     = relationship('LinkModel',     back_populates='link_endpoints', lazy='joined')
     endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints'
diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py
index 4059991e1f1af7851d9fced17739b92675261227..2f0c8a326a57a05ab1fd623a968dea0bc39d9e76 100644
--- a/src/context/service/database/models/PolicyRuleModel.py
+++ b/src/context/service/database/models/PolicyRuleModel.py
@@ -28,15 +28,15 @@ class PolicyRuleKindEnum(enum.Enum):
 class PolicyRuleModel(_Base):
     __tablename__ = 'policyrule'
 
-    policyrule_uuid          = Column(UUID(as_uuid=False), primary_key=True)
-    policyrule_kind          = Column(Enum(PolicyRuleKindEnum), nullable=False)
-    policyrule_state         = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False)
-    policyrule_state_message = Column(String, nullable=False)
-    policyrule_priority      = Column(Integer, nullable=False)
-    policyrule_service_uuid  = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True)
-    policyrule_eca_data      = Column(String, nullable=False)
-    created_at               = Column(DateTime, nullable=False)
-    updated_at               = Column(DateTime, nullable=False)
+    policyrule_uuid         = Column(UUID(as_uuid=False), primary_key=True)
+    policyrule_kind         = Column(Enum(PolicyRuleKindEnum), nullable=False)
+    policyrule_state        = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False)
+    policyrule_state_msg    = Column(String, nullable=False)
+    policyrule_priority     = Column(Integer, nullable=False)
+    policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True, index=True)
+    policyrule_eca_data     = Column(String, nullable=False)
+    created_at              = Column(DateTime, nullable=False)
+    updated_at              = Column(DateTime, nullable=False)
 
     policyrule_service = relationship('ServiceModel') # back_populates='policyrules'
     policyrule_devices = relationship('PolicyRuleDeviceModel' ) # back_populates='policyrule'
@@ -55,7 +55,7 @@ class PolicyRuleModel(_Base):
             'policyRuleId': self.dump_id(),
             'policyRuleState': {
                 'policyRuleState': self.policyrule_state.value,
-                'policyRuleStateMessage': self.policyrule_state_message,
+                'policyRuleStateMessage': self.policyrule_state_msg,
             },
             'priority': self.policyrule_priority,
         })
@@ -71,7 +71,7 @@ class PolicyRuleDeviceModel(_Base):
     __tablename__ = 'policyrule_device'
 
     policyrule_uuid = Column(ForeignKey('policyrule.policyrule_uuid', ondelete='RESTRICT'), primary_key=True)
-    device_uuid     = Column(ForeignKey('device.device_uuid',         ondelete='RESTRICT'), primary_key=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices'
     device     = relationship('DeviceModel',     lazy='joined') # back_populates='policyrule_devices'
diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py
index b581bf900a8861d9af199fef62bd218159b1e00e..09ff381b5eb374ea752590bba5403fe816319036 100644
--- a/src/context/service/database/models/ServiceModel.py
+++ b/src/context/service/database/models/ServiceModel.py
@@ -25,7 +25,7 @@ class ServiceModel(_Base):
     __tablename__ = 'service'
 
     service_uuid   = Column(UUID(as_uuid=False), primary_key=True)
-    context_uuid   = Column(ForeignKey('context.context_uuid'), nullable=False)
+    context_uuid   = Column(ForeignKey('context.context_uuid'), nullable=False, index=True)
     service_name   = Column(String, nullable=False)
     service_type   = Column(Enum(ORM_ServiceTypeEnum), nullable=False)
     service_status = Column(Enum(ORM_ServiceStatusEnum), nullable=False)
@@ -67,7 +67,7 @@ class ServiceEndPointModel(_Base):
     __tablename__ = 'service_endpoint'
 
     service_uuid  = Column(ForeignKey('service.service_uuid',   ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     service  = relationship('ServiceModel',  back_populates='service_endpoints', lazy='joined')
     endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints'
diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py
index 1a562bcd973cc41d777fdd20e4d42622afeebc44..2d6c884169154fee8d44c26464416c6708c650b1 100644
--- a/src/context/service/database/models/SliceModel.py
+++ b/src/context/service/database/models/SliceModel.py
@@ -24,7 +24,7 @@ class SliceModel(_Base):
     __tablename__ = 'slice'
 
     slice_uuid         = Column(UUID(as_uuid=False), primary_key=True)
-    context_uuid       = Column(ForeignKey('context.context_uuid'), nullable=False)
+    context_uuid       = Column(ForeignKey('context.context_uuid'), nullable=False, index=True)
     slice_name         = Column(String, nullable=True)
     slice_status       = Column(Enum(ORM_SliceStatusEnum), nullable=False)
     slice_owner_uuid   = Column(String, nullable=True)
@@ -81,7 +81,7 @@ class SliceEndPointModel(_Base):
     __tablename__ = 'slice_endpoint'
 
     slice_uuid    = Column(ForeignKey('slice.slice_uuid',       ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     slice    = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined')
     endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints'
@@ -90,7 +90,7 @@ class SliceServiceModel(_Base):
     __tablename__ = 'slice_service'
 
     slice_uuid   = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE' ), primary_key=True)
-    service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True)
+    service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     slice   = relationship('SliceModel', back_populates='slice_services', lazy='joined')
     service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services'
@@ -98,5 +98,9 @@ class SliceServiceModel(_Base):
 class SliceSubSliceModel(_Base):
     __tablename__ = 'slice_subslice'
 
-    slice_uuid    = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True)
-    subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True)
+    slice_uuid    = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True)
+    subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True)
+
+    slice    = relationship(
+        'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined')
+    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined')
diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py
index 92802e5b2ddb4ed57342bbd244255b73b11c6cce..7dc2333f0a9b979f251c173d850a235dcb822d91 100644
--- a/src/context/service/database/models/TopologyModel.py
+++ b/src/context/service/database/models/TopologyModel.py
@@ -22,7 +22,7 @@ class TopologyModel(_Base):
     __tablename__ = 'topology'
 
     topology_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    context_uuid  = Column(ForeignKey('context.context_uuid'), nullable=False)
+    context_uuid  = Column(ForeignKey('context.context_uuid'), nullable=False, index=True)
     topology_name = Column(String, nullable=False)
     created_at    = Column(DateTime, nullable=False)
     updated_at    = Column(DateTime, nullable=False)
@@ -56,8 +56,8 @@ class TopologyModel(_Base):
 class TopologyDeviceModel(_Base):
     __tablename__ = 'topology_device'
 
-    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True)
-    device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True)
+    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
+    device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True, index=True)
 
     #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices'
     device   = relationship('DeviceModel',   lazy='joined') # back_populates='topology_devices'
@@ -65,8 +65,8 @@ class TopologyDeviceModel(_Base):
 class TopologyLinkModel(_Base):
     __tablename__ = 'topology_link'
 
-    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True)
-    link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True)
+    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
+    link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True, index=True)
 
     #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links'
     link     = relationship('LinkModel',     lazy='joined') # back_populates='topology_links'
diff --git a/src/context/service/database/models/_Base.py b/src/context/service/database/models/_Base.py
index 4323fb7130462b13958627216c62f1fe4edc91c7..a10de60eb8731132ec815de1ff897c06ac12b665 100644
--- a/src/context/service/database/models/_Base.py
+++ b/src/context/service/database/models/_Base.py
@@ -13,10 +13,60 @@
 # limitations under the License.
 
 import sqlalchemy
-from sqlalchemy.orm import declarative_base
+from typing import Any, List
+from sqlalchemy.orm import Session, sessionmaker, declarative_base
+from sqlalchemy.sql import text
+from sqlalchemy_cockroachdb import run_transaction
 
 _Base = declarative_base()
 
+def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None:
+    def index_storing(
+        index_name : str, table_name : str, index_fields : List[str], storing_fields : List[str]
+    ) -> Any:
+        str_index_fields = ','.join(['"{:s}"'.format(index_field) for index_field in index_fields])
+        str_storing_fields = ','.join(['"{:s}"'.format(storing_field) for storing_field in storing_fields])
+        INDEX_STORING = 'CREATE INDEX IF NOT EXISTS {:s} ON "{:s}" ({:s}) STORING ({:s});'
+        return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields))
+
+    statements = [
+        index_storing('configrule_device_uuid_rec_idx', 'configrule', ['device_uuid'], [
+            'service_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('configrule_service_uuid_rec_idx', 'configrule', ['service_uuid'], [
+            'device_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('configrule_slice_uuid_rec_idx', 'configrule', ['slice_uuid'], [
+            'device_uuid', 'service_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('connection_service_uuid_rec_idx', 'connection', ['service_uuid'], [
+            'settings', 'created_at', 'updated_at'
+        ]),
+        index_storing('constraint_service_uuid_rec_idx', 'constraint', ['service_uuid'], [
+            'slice_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('constraint_slice_uuid_rec_idx', 'constraint', ['slice_uuid'], [
+            'service_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('endpoint_device_uuid_rec_idx', 'endpoint', ['device_uuid'], [
+            'topology_uuid', 'name', 'endpoint_type', 'kpi_sample_types', 'created_at', 'updated_at'
+        ]),
+        index_storing('service_context_uuid_rec_idx', 'service', ['context_uuid'], [
+            'service_name', 'service_type', 'service_status', 'created_at', 'updated_at'
+        ]),
+        index_storing('slice_context_uuid_rec_idx', 'slice', ['context_uuid'], [
+            'slice_name', 'slice_status', 'slice_owner_uuid', 'slice_owner_string', 'created_at', 'updated_at'
+        ]),
+
+        index_storing('topology_context_uuid_rec_idx', 'topology', ['context_uuid'], [
+            'topology_name', 'created_at', 'updated_at'
+        ]),
+    ]
+    def callback(session : Session) -> bool:
+        for stmt in statements: session.execute(stmt)
+    run_transaction(sessionmaker(bind=db_engine), callback)
+
 def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False):
     if drop_if_exists: _Base.metadata.drop_all(db_engine)
     _Base.metadata.create_all(db_engine)
+    create_performance_enhancers(db_engine)
diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py
index 8634c1f309e0c060654a168a1ad400f1d4722a32..6b52ef4c0f3583de628706ba79efffb9d5709820 100644
--- a/src/context/tests/Objects.py
+++ b/src/context/tests/Objects.py
@@ -17,7 +17,7 @@ from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.tools.object_factory.ConfigRule import json_config_rule_set
 from common.tools.object_factory.Connection import json_connection, json_connection_id
-from common.tools.object_factory.Constraint import json_constraint_custom
+from common.tools.object_factory.Constraint import json_constraint_custom, json_constraint_sla_latency
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import json_device_id, json_device_packetrouter_disabled
 from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
@@ -95,7 +95,7 @@ def compose_service(
         for device_id, endpoint_name in endpoint_ids
     ]
     constraints = [
-        json_constraint_custom('latency[ms]', str(latency_ms)),
+        json_constraint_sla_latency(latency_ms),
         json_constraint_custom('jitter[us]',  str(jitter_us)),
     ]
     config_rules = [
@@ -128,7 +128,7 @@ def compose_slice(
         for device_id, endpoint_name in endpoint_ids
     ]
     constraints = [
-        json_constraint_custom('latency[ms]', str(latency_ms)),
+        json_constraint_sla_latency(latency_ms),
         json_constraint_custom('jitter[us]',  str(jitter_us)),
     ]
     config_rules = [
diff --git a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py
index ea5cf2cb77e34fc4f9e88490ff400b92b1f64e66..5bbbf89e84e764677638b7e4e3f4934336321576 100644
--- a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py
+++ b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py
@@ -51,7 +51,7 @@ class SyntheticSamplingParameters:
             metric = match.group(2)
             metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '')
 
-            LOGGER.info(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense))
+            LOGGER.debug(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense))
 
             parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense)
             parameters = self.__data.get(parameters_key)
diff --git a/src/device/service/drivers/xr/README_XR.md b/src/device/service/drivers/xr/README_XR.md
index c741c3e808ebddd20c9c4749064964594ea32b73..fa1bc944035d27769cd9c16e0c29318e554e9489 100644
--- a/src/device/service/drivers/xr/README_XR.md
+++ b/src/device/service/drivers/xr/README_XR.md
@@ -146,6 +146,18 @@ arbitrary endpoints in the topology (with consequent underlying XR service insta
     PYTHONPATH=../../../../ ./service-cli.py list
     PYTHONPATH=../../../../ ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f
 ```
+
+It is also possible to create direct XR services without multi-layer services. E.g.:
+```
+    PYTHONPATH=../../../../  ./service-cli.py create-xr FooService X1-XR-CONSTELLATION  "XR HUB 1|XR-T1" "XR LEAF 2|XR-T1"
+```
+
+Additionally it is possible to list services and endpoints:
+```
+    PYTHONPATH=../../../../  ./service-cli.py list-endpoints
+    PYTHONPATH=../../../../  ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f
+```
+
 The PYTHONPATH is mandatory. Suitable topology JSON must have been loaded before. With the
 CocroachDB persistence, it is sufficient to load the topology once and it will persist.
 
diff --git a/src/device/service/drivers/xr/service-cli.py b/src/device/service/drivers/xr/service-cli.py
index 01bd2aaa118225cf74a953fff81b54abb857e39b..7ab9606cef7bd7d3cca4f414cbd704ab150c8f52 100755
--- a/src/device/service/drivers/xr/service-cli.py
+++ b/src/device/service/drivers/xr/service-cli.py
@@ -19,21 +19,33 @@
 #
 # Run in this directory with PYTHONPATH=../../../../
 # E.g.:
+#   Create multi-layer service (L2 VPN over XR):
 #     PYTHONPATH=../../../../  ./service-cli.py create 1 R1-EMU 13/1/2 500 2 R3-EMU 13/1/2 500
+#   Single-layer (XR without services on top of it):
+#     PYTHONPATH=../../../../  ./service-cli.py create-xr FooService X1-XR-CONSTELLATION  "XR HUB 1|XR-T1" "XR LEAF 2|XR-T1"
+#   List services:
 #     PYTHONPATH=../../../../  ./service-cli.py list
+#   List possible endpoints:
+#     PYTHONPATH=../../../../  ./service-cli.py list-endpoints
+#   Delete service (if multi-layer, always deleter highest layer!)
 #     PYTHONPATH=../../../../  ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f
 
-
 import argparse
 import logging
-import traceback
+from copy import deepcopy
+from dataclasses import dataclass, field
+from typing import Dict
 from contextlib import contextmanager
 
 from common.Settings import get_setting
 from context.client.ContextClient import ContextClient
+from service.client.ServiceClient import ServiceClient
 from tests.tools.mock_osm.MockOSM import MockOSM
-from common.proto.context_pb2 import ContextId, ServiceTypeEnum, ServiceStatusEnum
+from common.proto.context_pb2 import ContextId, ServiceTypeEnum, ServiceStatusEnum, Service, Empty, ServiceId
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Topology import json_topology_id
+from common.tools.object_factory.ConfigRule import json_config_rule_set
 
 LOGGER = logging.getLogger(__name__)
 
@@ -48,11 +60,52 @@ def make_context_client():
     finally:
         _client.close()
 
+@contextmanager
+def make_service_client():
+    try:
+        _client = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC'))
+        yield _client
+    finally:
+        _client.close()
+
 def make_osm_wim():
     wim_url = 'http://{:s}:{:s}'.format(
         get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
     return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
 
+@dataclass
+class DevInfo:
+    name: str
+    uuid: str
+    endpoints: Dict[str, str] = field(default_factory= dict)
+    endpoints_by_uuid: Dict[str, str] = field(default_factory= dict)
+
+    def get_endpoint_uuid_or_exit(self, ep_name: str) -> str:
+        if ep_name not in self.endpoints:
+            print(f"Endpoint {ep_name} does not exist in device {self.name}. See \"service-cli.py list-endpoints\"")
+            exit(-1)
+        return self.endpoints[ep_name]
+
+def get_devices(cc: ContextClient) -> Dict[str, DevInfo]:
+    r = cc.ListDevices(Empty())
+    # print(grpc_message_to_json_string(r))
+
+    devices = dict()
+    for dev in r.devices:
+        di = DevInfo(dev.name, dev.device_id.device_uuid.uuid)
+        for ep in dev.device_endpoints:
+            di.endpoints[ep.name] = ep.endpoint_id.endpoint_uuid.uuid
+            di.endpoints_by_uuid[ep.endpoint_id.endpoint_uuid.uuid] = ep.name
+        devices[dev.name] = di
+    return devices
+
+def get_endpoint_map(devices: Dict[str, DevInfo]):
+    ep_map = dict()
+    for dev in devices.values():
+        for ep_name, ep_uuid in dev.endpoints.items():
+            ep_map[ep_uuid] = (dev.name, ep_name)
+    return ep_map
+
 logging.basicConfig(level=logging.ERROR)
 
 parser = argparse.ArgumentParser(description='TF Service Management Utility')
@@ -74,6 +127,13 @@ delete_parser = subparsers.add_parser('delete')
 delete_parser.add_argument('service_uuid', type=str, help='UUID of the service to be deleted')
 
 list_parser = subparsers.add_parser('list')
+list_parser = subparsers.add_parser('list-endpoints')
+
+create_xr_parser = subparsers.add_parser('create-xr')
+create_xr_parser.add_argument('service_name', type=str, help='Service Name')
+create_xr_parser.add_argument('constellation', type=str, help='XR Constellation')
+create_xr_parser.add_argument('interface1', type=str, help='One endpoint of the service')
+create_xr_parser.add_argument('interface2', type=str, help='Second endpoint of the service')
 
 args = parser.parse_args()
 
@@ -103,12 +163,17 @@ else:
     WIM_SERVICE_CONNECTION_POINTS = []
 
 #print(str(args))
-print(f"=== WIM_SERVICE_TYPE: {WIM_SERVICE_TYPE}")
-print(f"=== WIM_SERVICE_CONNECTION_POINTS: {WIM_SERVICE_CONNECTION_POINTS}")
-print(f"=== WIM_MAPPING: {WIM_MAPPING}")
+#print(f"=== WIM_SERVICE_TYPE: {WIM_SERVICE_TYPE}")
+#print(f"=== WIM_SERVICE_CONNECTION_POINTS: {WIM_SERVICE_CONNECTION_POINTS}")
+#print(f"=== WIM_MAPPING: {WIM_MAPPING}")
 
 with make_context_client() as client:
-    osm_wim = make_osm_wim();
+    # We only permit one context on our demos/testing
+    response = client.ListContextIds(Empty())
+    assert len(response.context_ids) == 1
+    context_uuid=json_context_id(response.context_ids[0].context_uuid.uuid)
+
+    osm_wim = make_osm_wim()
 
     if args.command == "create":
         service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
@@ -117,28 +182,122 @@ with make_context_client() as client:
         print(f"*** Get created service status --> {str(status)}")
 
     elif args.command == "delete":
-        osm_wim.wim.check_credentials()
+        service_id = {
+            "context_id": context_uuid,
+            "service_uuid": {
+                "uuid": args.service_uuid
+            }
+        }
+
         try:
-            osm_wim.wim.delete_connectivity_service(args.service_uuid)
-            print(f"*** Service {args.service_uuid} is no longer present (delete was successfull or service did not exist)")
-        except Exception as e:
-            print(f"*** Failed to delete service {args.service_uuid}, {e}")
+            response = client.GetService(ServiceId(**service_id))
+            #print(grpc_message_to_json_string(response))
+
+            high_level_delete = response.service_type == ServiceTypeEnum.SERVICETYPE_L2NM or response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+            print(f"Deleting service {response.name}, type {ServiceTypeEnum.Name(response.service_type)}, {high_level_delete=}")
+
+        except:
+            print(f"No service with uuid {args.service_uuid} ({service_id})")
+            exit(-1)
+
+        if high_level_delete:
+            osm_wim.wim.check_credentials()
+            try:
+                osm_wim.wim.delete_connectivity_service(args.service_uuid)
+                print(f"*** Service {args.service_uuid} deleted (L2SM/L3SM layer)")
+            except Exception as e:
+                print(f"*** Failed to delete service {args.service_uuid}, {e}")
+        else:
+            with make_service_client() as service_client:
+                try:
+                    service_client.DeleteService(ServiceId(**service_id))
+                    print(f"*** Service {args.service_uuid} deleted (low level)")
+                except Exception as e:
+                    print(f"*** Failed to delete service {args.service_uuid}, {e}")
+
+    elif args.command == "create-xr":
+        CONTEXT_NAME = 'admin'
+        CONTEXT_ID   = json_context_id(CONTEXT_NAME)
+        CONTEXT      = json_context(CONTEXT_NAME, name=CONTEXT_NAME)
+
+        json_tapi_settings = {
+            'capacity_value'  : 50.0,
+            'capacity_unit'   : 'GHz',
+            'layer_proto_name': 'PHOTONIC_MEDIA',
+            'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC',
+            'direction'       : 'UNIDIRECTIONAL',
+        }
+        config_rule = json_config_rule_set('/settings', json_tapi_settings)
+
+        devices = get_devices(client)
+        if args.constellation not in devices:
+            print(f"Constellation {args.constellation} does not exist as a device. See \"service-cli.py list-endpoints\"")
+            exit(-1)
+        else:
+            dev_info = devices[args.constellation]
+            constellation_uuid = dev_info.uuid
+
+        interface1_uuid = dev_info.get_endpoint_uuid_or_exit(args.interface1)
+        interface2_uuid = dev_info.get_endpoint_uuid_or_exit(args.interface2)
+
+        print(f"Constellation {args.constellation:40}: {constellation_uuid:36}")
+        print(f"Interface 1   {args.interface1:40}: {interface1_uuid:36}")
+        print(f"Interface 2   {args.interface2:40}: {interface2_uuid:36}")
+
+        service_request = {
+            "name": args.service_name,
+            "service_id": {
+                 "context_id": {"context_uuid": {"uuid": response.context_ids[0].context_uuid.uuid}},
+                 "service_uuid": {"uuid": args.service_name}
+            },
+            'service_type'        : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
+            "service_endpoint_ids": [
+                {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': interface1_uuid}, 'topology_id': json_topology_id("admin", context_id=context_uuid)},
+                {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': interface2_uuid}, 'topology_id': json_topology_id("admin", context_id=context_uuid)}
+            ],
+            'service_status'      : {'service_status': ServiceStatusEnum.SERVICESTATUS_PLANNED},
+            'service_constraints' : [],
+        }
+
+        with make_service_client() as service_client:
+            sr = deepcopy(service_request)
+            endpoints, sr['service_endpoint_ids'] = sr['service_endpoint_ids'], []
+            create_response = service_client.CreateService(Service(**sr))
+            print(f'CreateService: {grpc_message_to_json_string(create_response)}')
+
+            sr['service_endpoint_ids'] = endpoints
+            #sr['service_id']['service_uuid'] = create_response
+            sr['service_config'] = {'config_rules': [config_rule]}
+
+            update_response = service_client.UpdateService(Service(**sr))
+            print(f'UpdateService: {grpc_message_to_json_string(update_response)}')
+
     elif args.command == "list":
+        devices = get_devices(client)
+        ep_map = get_endpoint_map(devices)
+
         response = client.ListServices(ContextId(**CONTEXT_ID))
 
-        #print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+        # print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
         for service in response.services:
             scs = ""
-            
-            # See if there are endpoint constraints that might be regognizable by the user.
-            # Keys do not necessarily exist, so catch exceptions and ignore those constraints
-            # that we cannot easily represent.
-            for sc in service.service_constraints:
-                try:
-                    scs += f"{sc.endpoint_location.endpoint_id.device_id.device_uuid.uuid}:{sc.endpoint_location.endpoint_id.endpoint_uuid.uuid} "
-                except Exception:
-                    pass
 
-            print(f"{service.service_id.service_uuid.uuid:36}  {ServiceTypeEnum.Name(service.service_type):40}  {ServiceStatusEnum.Name(service.service_status.service_status)}  {scs}")
+            ep_list = []
+            for ep in service.service_endpoint_ids:
+                ep_uuid = ep.endpoint_uuid.uuid
+                if ep_uuid in ep_map:
+                    dev_name, ep_name = ep_map[ep_uuid]
+                    ep_list.append(f"{dev_name}:{ep_name}")
+            ep_list.sort()
+            eps = ", ".join(ep_list)
 
+            #print(f"{service.service_id.service_uuid.uuid:36}  {ServiceTypeEnum.Name(service.service_type):40}  {service.name:40}  {ServiceStatusEnum.Name(service.service_status.service_status)}  {scs}")
+            print(f"{service.service_id.service_uuid.uuid:36}  {ServiceTypeEnum.Name(service.service_type):40}  {service.name:40}  {ServiceStatusEnum.Name(service.service_status.service_status):28}  {eps}")
 
+    elif args.command == "list-endpoints":
+        devices = get_devices(client)
+        for name in sorted(devices.keys()):
+            dev = devices[name]
+            print(f"{name:40}    {dev.uuid:36}")
+            for ep_name in sorted(dev.endpoints.keys()):
+                print(f"    {ep_name:40}    {dev.endpoints[ep_name]:36}")
diff --git a/src/device/service/drivers/xr/setup_test_env.sh b/src/device/service/drivers/xr/setup_test_env.sh
index 92ff4a0312fb8f963f934f4cfd8d18603675aed0..bd5463cd4f9d08c903fc601cfcb7241b672e7681 100755
--- a/src/device/service/drivers/xr/setup_test_env.sh
+++ b/src/device/service/drivers/xr/setup_test_env.sh
@@ -17,7 +17,11 @@ export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get service/contextservice --namesp
 export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service/contextservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}')
 export COMPUTESERVICE_SERVICE_HOST=$(kubectl get service/computeservice --namespace tfs  --template '{{.spec.clusterIP}}')
 export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service/computeservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
+export SERVICESERVICE_SERVICE_HOST=$(kubectl get service/serviceservice --namespace tfs  --template '{{.spec.clusterIP}}')
+export SERVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service/serviceservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}')
 echo "CONTEXTSERVICE_SERVICE_HOST=$CONTEXTSERVICE_SERVICE_HOST"
 echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=$CONTEXTSERVICE_SERVICE_PORT_GRPC"
 echo "COMPUTESERVICE_SERVICE_HOST=$COMPUTESERVICE_SERVICE_HOST"
 echo "COMPUTESERVICE_SERVICE_PORT_HTTP=$COMPUTESERVICE_SERVICE_PORT_HTTP"
+echo "SERVICESERVICE_SERVICE_HOST=$SERVICESERVICE_SERVICE_HOST"
+echo "SERVICESERVICE_SERVICE_PORT_GRPC=$SERVICESERVICE_SERVICE_PORT_GRPC"
diff --git a/src/load_generator/client/LoadGeneratorClient.py b/src/load_generator/client/LoadGeneratorClient.py
index 99626bbbb59671af41c11054d34338194f42a6af..2bed40dfdfe13d2920166bcb56237fe84bff8789 100644
--- a/src/load_generator/client/LoadGeneratorClient.py
+++ b/src/load_generator/client/LoadGeneratorClient.py
@@ -16,6 +16,7 @@ import grpc, logging
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_host, get_service_port_grpc
 from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2 import Parameters, Status
 from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceStub
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
@@ -46,12 +47,19 @@ class LoadGeneratorClient:
         self.stub = None
 
     @RETRY_DECORATOR
-    def Start(self, request : Empty) -> Empty:
+    def Start(self, request : Parameters) -> Empty:
         LOGGER.debug('Start request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.Start(request)
         LOGGER.debug('Start result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def GetStatus(self, request : Empty) -> Status:
+        LOGGER.debug('GetStatus request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetStatus(request)
+        LOGGER.debug('GetStatus result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def Stop(self, request : Empty) -> Empty:
         LOGGER.debug('Stop request: {:s}'.format(grpc_message_to_json_string(request)))
diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py
index b71dd9a35329e2aef6ce64739f59103a656b4de3..9ae3cdc1216891ca4dfcf01c1bd49d27bf4ef6f6 100644
--- a/src/load_generator/load_gen/Constants.py
+++ b/src/load_generator/load_gen/Constants.py
@@ -26,3 +26,5 @@ ENDPOINT_COMPATIBILITY = {
     'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:INPUT': 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:OUTPUT',
     'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT'  : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT',
 }
+
+MAX_WORKER_THREADS = 10
\ No newline at end of file
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index 906c26e98a75fe3c8f15d628f863faac4ba2ea16..e94dc0cb948d703f71925fd932e749ebb544650e 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -14,9 +14,11 @@
 
 import logging, json, random, threading
 from typing import Dict, Optional, Set, Tuple
-from common.proto.context_pb2 import Empty, TopologyId
+from common.proto.context_pb2 import Empty, IsolationLevelEnum, TopologyId
 from common.tools.grpc.Tools import grpc_message_to_json
-from common.tools.object_factory.Constraint import json_constraint_custom
+from common.tools.object_factory.Constraint import (
+    json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_isolation,
+    json_constraint_sla_latency)
 from common.tools.object_factory.ConfigRule import json_config_rule_set
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
@@ -36,7 +38,7 @@ class RequestGenerator:
     def __init__(self, parameters : Parameters) -> None:
         self._parameters = parameters
         self._lock = threading.Lock()
-        self._num_requests = 0
+        self._num_generated = 0
         self._available_device_endpoints : Dict[str, Set[str]] = dict()
         self._used_device_endpoints : Dict[str, Dict[str, str]] = dict()
         self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict()
@@ -45,6 +47,12 @@ class RequestGenerator:
         self._device_data : Dict[str, Dict] = dict()
         self._device_endpoint_data : Dict[str, Dict[str, Dict]] = dict()
 
+    @property
+    def num_generated(self): return self._num_generated
+
+    @property
+    def infinite_loop(self): return self._parameters.num_requests == 0
+
     def initialize(self) -> None:
         with self._lock:
             self._available_device_endpoints.clear()
@@ -96,17 +104,14 @@ class RequestGenerator:
                     if self._parameters.record_to_dlt:
                         record_link_to_dlt(dlt_connector_client, dlt_domain_id, link.link_id)
 
-    @property
-    def num_requests_generated(self): return self._num_requests
-
     def dump_state(self) -> None:
         with self._lock:
             _endpoints = {
                 device_uuid:[endpoint_uuid for endpoint_uuid in endpoint_uuids]
                 for device_uuid,endpoint_uuids in self._available_device_endpoints.items()
             }
-            LOGGER.info('[dump_state] available_device_endpoints = {:s}'.format(json.dumps(_endpoints)))
-            LOGGER.info('[dump_state] used_device_endpoints = {:s}'.format(json.dumps(self._used_device_endpoints)))
+            LOGGER.debug('[dump_state] available_device_endpoints = {:s}'.format(json.dumps(_endpoints)))
+            LOGGER.debug('[dump_state] used_device_endpoints = {:s}'.format(json.dumps(self._used_device_endpoints)))
 
     def _use_device_endpoint(
         self, service_uuid : str, request_type : RequestType, endpoint_types : Optional[Set[str]] = None,
@@ -167,10 +172,13 @@ class RequestGenerator:
             self._used_device_endpoints.setdefault(device_uuid, dict()).pop(endpoint_uuid, None)
             self._available_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid)
 
-    def compose_request(self) -> Optional[Dict]:
+    def compose_request(self) -> Tuple[bool, Optional[Dict]]: # completed, request
         with self._lock:
-            self._num_requests += 1
-            num_request = self._num_requests
+            if not self.infinite_loop and (self._num_generated >= self._parameters.num_requests):
+                LOGGER.info('Generation Done!')
+                return True, None # completed
+            self._num_generated += 1
+            num_request = self._num_generated
 
         #request_uuid = str(uuid.uuid4())
         request_uuid = 'svc_{:d}'.format(num_request)
@@ -181,9 +189,9 @@ class RequestGenerator:
         if request_type in {
             RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, RequestType.SERVICE_TAPI, RequestType.SERVICE_MW
         }:
-            return self._compose_service(num_request, request_uuid, request_type)
+            return False, self._compose_service(num_request, request_uuid, request_type)
         elif request_type in {RequestType.SLICE_L2NM, RequestType.SLICE_L3NM}:
-            return self._compose_slice(num_request, request_uuid, request_type)
+            return False, self._compose_slice(num_request, request_uuid, request_type)
 
     def _compose_service(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]:
         # choose source endpoint
@@ -222,10 +230,17 @@ class RequestGenerator:
         ]
 
         if request_type == RequestType.SERVICE_L2NM:
+            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+
             constraints = [
-                json_constraint_custom('bandwidth[gbps]', '10.0'),
-                json_constraint_custom('latency[ms]',     '20.0'),
+                json_constraint_sla_availability(1, True, availability),
+                json_constraint_sla_capacity(capacity_gbps),
+                json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]),
+                json_constraint_sla_latency(e2e_latency_ms),
             ]
+
             vlan_id = num_request % 1000
             circuit_id = '{:03d}'.format(vlan_id)
 
@@ -260,10 +275,17 @@ class RequestGenerator:
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
         elif request_type == RequestType.SERVICE_L3NM:
+            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+
             constraints = [
-                json_constraint_custom('bandwidth[gbps]', '10.0'),
-                json_constraint_custom('latency[ms]',     '20.0'),
+                json_constraint_sla_availability(1, True, availability),
+                json_constraint_sla_capacity(capacity_gbps),
+                json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]),
+                json_constraint_sla_latency(e2e_latency_ms),
             ]
+
             vlan_id = num_request % 1000
             bgp_as = 60000 + (num_request % 10000)
             bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333)
@@ -357,9 +379,15 @@ class RequestGenerator:
             json_endpoint_id(json_device_id(src_device_uuid), src_endpoint_uuid),
             json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
         ]
+
+        availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
+        capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+        e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
         constraints = [
-            json_constraint_custom('bandwidth[gbps]', '10.0'),
-            json_constraint_custom('latency[ms]',     '20.0'),
+            json_constraint_sla_availability(1, True, availability),
+            json_constraint_sla_capacity(capacity_gbps),
+            json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]),
+            json_constraint_sla_latency(e2e_latency_ms),
         ]
 
         if request_type == RequestType.SLICE_L2NM:
diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py
index 775da1580a2a6521dbdc8fe32236c1f2adb4b3a7..773a37eac258f8b3c16c966464ced124d3c77c85 100644
--- a/src/load_generator/load_gen/RequestScheduler.py
+++ b/src/load_generator/load_gen/RequestScheduler.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, logging, pytz, random
+import copy, logging, pytz, random, threading
 from apscheduler.executors.pool import ThreadPoolExecutor
 from apscheduler.jobstores.memory import MemoryJobStore
 from apscheduler.schedulers.blocking import BlockingScheduler
@@ -21,6 +21,7 @@ from typing import Dict, Optional
 from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
+from .Constants import MAX_WORKER_THREADS
 from .DltTools import explore_entities_to_record, record_entities
 from .Parameters import Parameters
 from .RequestGenerator import RequestGenerator
@@ -37,7 +38,7 @@ class RequestScheduler:
         self._scheduler = scheduler_class()
         self._scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=10)},
+            executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)},
             job_defaults = {
                 'coalesce': False,
                 'max_instances': 100,
@@ -46,14 +47,18 @@ class RequestScheduler:
             timezone=pytz.utc)
         self._parameters = parameters
         self._generator = generator
+        self._running = threading.Event()
+
+    @property
+    def num_generated(self): return min(self._generator.num_generated, self._parameters.num_requests)
+
+    @property
+    def infinite_loop(self): return self._generator.infinite_loop
+
+    @property
+    def running(self): return self._running.is_set()
 
     def _schedule_request_setup(self) -> None:
-        infinite_loop = self._parameters.num_requests == 0
-        num_requests_generated = self._generator.num_requests_generated - 1 # because it first increases, then checks
-        if not infinite_loop and (num_requests_generated >= self._parameters.num_requests):
-            LOGGER.info('Generation Done!')
-            #self._scheduler.shutdown()
-            return
         iat = random.expovariate(1.0 / self._parameters.inter_arrival_time)
         run_date = datetime.utcnow() + timedelta(seconds=iat)
         self._scheduler.add_job(
@@ -66,16 +71,24 @@ class RequestScheduler:
             self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc)
 
     def start(self):
+        self._running.set()
         self._schedule_request_setup()
         self._scheduler.start()
 
     def stop(self):
         self._scheduler.shutdown()
+        self._running.clear()
 
     def _request_setup(self) -> None:
-        self._schedule_request_setup()
+        completed,request = self._generator.compose_request()
+        if completed:
+            LOGGER.info('Generation Done!')
+            #self._scheduler.shutdown()
+            self._running.clear()
+            return
+        else:
+            self._schedule_request_setup()
 
-        request = self._generator.compose_request()
         if request is None:
             LOGGER.warning('No resources available to compose new request')
             return
diff --git a/src/load_generator/service/Constants.py b/src/load_generator/service/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c339877c70363e874df278d6b5d29cc47a3be0f
--- /dev/null
+++ b/src/load_generator/service/Constants.py
@@ -0,0 +1,27 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.load_generator_pb2 import RequestTypeEnum
+from load_generator.load_gen.Constants import RequestType
+
+REQUEST_TYPE_MAP = {
+    RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM : RequestType.SERVICE_L2NM,
+    RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM : RequestType.SERVICE_L3NM,
+    RequestTypeEnum.REQUESTTYPE_SERVICE_MW   : RequestType.SERVICE_MW,
+    RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI : RequestType.SERVICE_TAPI,
+    RequestTypeEnum.REQUESTTYPE_SLICE_L2NM   : RequestType.SLICE_L2NM,
+    RequestTypeEnum.REQUESTTYPE_SLICE_L3NM   : RequestType.SLICE_L3NM,
+}
+
+REQUEST_TYPE_REVERSE_MAP = {v:k for k,v in REQUEST_TYPE_MAP.items()}
diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
index c280581ddfab488249ff249e60118ec3030e0447..d66b0b2c10c5228e0c3d15759fc46b2c0770154d 100644
--- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
+++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
@@ -12,43 +12,39 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Optional
 import grpc, logging
+from typing import Optional
 from apscheduler.schedulers.background import BackgroundScheduler
 from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2 import Parameters, Status
 from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer
-from load_generator.load_gen.Constants import RequestType
-from load_generator.load_gen.Parameters import Parameters
+from load_generator.load_gen.Parameters import Parameters as LoadGen_Parameters
 from load_generator.load_gen.RequestGenerator import RequestGenerator
 from load_generator.load_gen.RequestScheduler import RequestScheduler
+from .Constants import REQUEST_TYPE_MAP, REQUEST_TYPE_REVERSE_MAP
 
 LOGGER = logging.getLogger(__name__)
 
 class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
     def __init__(self):
         LOGGER.debug('Creating Servicer...')
-        self._parameters = Parameters(
-            num_requests = 100,
-            request_types = [
-                RequestType.SERVICE_L2NM,
-                RequestType.SERVICE_L3NM,
-                #RequestType.SERVICE_MW,
-                #RequestType.SERVICE_TAPI,
-                RequestType.SLICE_L2NM,
-                RequestType.SLICE_L3NM,
-            ],
-            offered_load  = 50,
-            holding_time  = 10,
-            do_teardown   = True,
-            dry_mode      = False,           # in dry mode, no request is sent to TeraFlowSDN
-            record_to_dlt = False,           # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT
-            dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False
-        )
         self._generator : Optional[RequestGenerator] = None
         self._scheduler : Optional[RequestScheduler] = None
         LOGGER.debug('Servicer Created')
 
-    def Start(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+    def Start(self, request : Parameters, context : grpc.ServicerContext) -> Empty:
+        self._parameters = LoadGen_Parameters(
+            num_requests       = request.num_requests,
+            request_types      = [REQUEST_TYPE_MAP[rt] for rt in request.request_types],
+            offered_load       = request.offered_load if request.offered_load > 1.e-12 else None,
+            holding_time       = request.holding_time if request.holding_time > 1.e-12 else None,
+            inter_arrival_time = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None,
+            do_teardown        = request.do_teardown,   # if set, schedule tear down of requests
+            dry_mode           = request.dry_mode,      # in dry mode, no request is sent to TeraFlowSDN
+            record_to_dlt      = request.record_to_dlt, # if set, upload changes to DLT
+            dlt_domain_id      = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True)
+        )
+
         LOGGER.info('Initializing Generator...')
         self._generator = RequestGenerator(self._parameters)
         self._generator.initialize()
@@ -58,6 +54,33 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
         self._scheduler.start()
         return Empty()
 
+    def GetStatus(self, request : Empty, context : grpc.ServicerContext) -> Status:
+        if self._scheduler is None:
+            # not started
+            status = Status()
+            status.num_generated = 0
+            status.infinite_loop = False
+            status.running       = False
+            return status
+
+        params = self._scheduler._parameters
+        request_types = [REQUEST_TYPE_REVERSE_MAP[rt] for rt in params.request_types]
+
+        status = Status()
+        status.num_generated = self._scheduler.num_generated
+        status.infinite_loop = self._scheduler.infinite_loop
+        status.running       = self._scheduler.running
+        status.parameters.num_requests       = params.num_requests          # pylint: disable=no-member
+        status.parameters.offered_load       = params.offered_load          # pylint: disable=no-member
+        status.parameters.holding_time       = params.holding_time          # pylint: disable=no-member
+        status.parameters.inter_arrival_time = params.inter_arrival_time    # pylint: disable=no-member
+        status.parameters.do_teardown        = params.do_teardown           # pylint: disable=no-member
+        status.parameters.dry_mode           = params.dry_mode              # pylint: disable=no-member
+        status.parameters.record_to_dlt      = params.record_to_dlt         # pylint: disable=no-member
+        status.parameters.dlt_domain_id      = params.dlt_domain_id         # pylint: disable=no-member
+        status.parameters.request_types.extend(request_types)               # pylint: disable=no-member
+        return status
+
     def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty:
         if self._scheduler is not None:
             self._scheduler.stop()
diff --git a/src/monitoring/.gitlab-ci.yml b/src/monitoring/.gitlab-ci.yml
index ff620c53425f8f447dcb00ea03bc4c9f8ce4c5e9..7c3a14975d9c7bf7d5d46be917203338bea7f1f9 100644
--- a/src/monitoring/.gitlab-ci.yml
+++ b/src/monitoring/.gitlab-ci.yml
@@ -56,7 +56,7 @@ unit_test monitoring:
     - docker pull questdb/questdb
     - docker run --name questdb -d -p 9000:9000  -p 9009:9009  -p 8812:8812  -p 9003:9003  -e QDB_CAIRO_COMMIT_LAG=1000 -e QDB_CAIRO_MAX_UNCOMMITTED_ROWS=100000 --network=teraflowbridge --rm questdb/questdb
     - sleep 10
-    - docker run --name $IMAGE_NAME -d -p 7070:7070 --env METRICSDB_HOSTNAME=questdb --env METRICSDB_ILP_PORT=9009 --env METRICSDB_REST_PORT=9000 --env METRICSDB_TABLE=monitoring -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker run --name $IMAGE_NAME -d -p 7070:7070 --env METRICSDB_HOSTNAME=questdb --env METRICSDB_ILP_PORT=9009 --env METRICSDB_REST_PORT=9000 --env METRICSDB_TABLE_MONITORING_KPIS=tfs_monitoring_kpis -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
     - sleep 30
     - docker ps -a
     - docker logs $IMAGE_NAME
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index 0bbce15094b87a17e332aad21bf34a565e8dd087..f408734df40c1bc5c16b7e108e3ce5a211165f71 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -47,7 +47,7 @@ MONITORING_INCLUDEKPI_COUNTER = Counter('monitoring_includekpi_counter', 'Monito
 METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME")
 METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT")
 METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT")
-METRICSDB_TABLE = os.environ.get("METRICSDB_TABLE")
+METRICSDB_TABLE_MONITORING_KPIS = os.environ.get("METRICSDB_TABLE_MONITORING_KPIS")
 
 class MonitoringServiceServicerImpl(MonitoringServiceServicer):
     def __init__(self, name_mapping : NameMapping):
@@ -57,7 +57,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         self.management_db = ManagementDBTools.ManagementDB('monitoring.db')
         self.deviceClient = DeviceClient()
         self.metrics_db = MetricsDBTools.MetricsDB(
-            METRICSDB_HOSTNAME, name_mapping, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE)
+            METRICSDB_HOSTNAME, name_mapping, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE_MONITORING_KPIS)
         self.subs_manager = SubscriptionManager(self.metrics_db)
         self.alarm_manager = AlarmManager(self.metrics_db)
         LOGGER.info('MetricsDB initialized')
@@ -592,8 +592,8 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
                 LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
                 response.kpi_id.kpi_id.uuid = "NoID"
             else:
-                query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE} WHERE kpi_id = '{kpi_id}' " \
-                        f"LATEST ON timestamp PARTITION BY kpi_id"
+                query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE_MONITORING_KPIS} " \
+                        f"WHERE kpi_id = '{kpi_id}' LATEST ON timestamp PARTITION BY kpi_id"
                 data = self.metrics_db.run_query(query)
                 LOGGER.debug(data)
                 if len(data) == 0:
diff --git a/src/monitoring/service/__main__.py b/src/monitoring/service/__main__.py
index fc460151b370c0eb5335787ed4677f7008881ad2..14f5609602c90eb9f54462e423af100997cf00d2 100644
--- a/src/monitoring/service/__main__.py
+++ b/src/monitoring/service/__main__.py
@@ -69,6 +69,8 @@ def main():
     wait_for_environment_variables([
         get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
         get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     signal.signal(signal.SIGINT,  signal_handler)
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index 1428b0ed56dbb24a24af8fde42e4d073a48c931d..c883f9d141fc28645761641b0ccd10294b538bd2 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -75,7 +75,7 @@ os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_POR
 METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME')
 METRICSDB_ILP_PORT = os.environ.get('METRICSDB_ILP_PORT')
 METRICSDB_REST_PORT = os.environ.get('METRICSDB_REST_PORT')
-METRICSDB_TABLE = os.environ.get('METRICSDB_TABLE')
+METRICSDB_TABLE_MONITORING_KPIS = os.environ.get('METRICSDB_TABLE_MONITORING_KPIS')
 
 LOGGER = logging.getLogger(__name__)
 
@@ -193,7 +193,7 @@ def management_db():
 def metrics_db(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name
     return monitoring_service.monitoring_servicer.metrics_db
     #_metrics_db = MetricsDBTools.MetricsDB(
-    #    METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE)
+    #    METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE_MONITORING_KPIS)
     #return _metrics_db
 
 @pytest.fixture(scope='session')
diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
index a9fc4fa3d499f634f021d9ebbb4a749b4f8715c7..a6d39ee36949e075323613fceb71da5c77354fe5 100644
--- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py
@@ -54,13 +54,15 @@ class KDisjointPathAlgorithm(_Algorithm):
             self.services_details.setdefault(service_key, service_details)
 
             for constraint in service.service_constraints:
-                if constraint.WhichOneof('constraint') == 'custom':
+                kind = constraint.WhichOneof('constraint')
+
+                if kind == 'custom':
                     constraint_type = constraint.custom.constraint_type
                     if constraint_type not in CUSTOM_CONSTRAINTS: continue
                     constraint_value = constraint.custom.constraint_value
                     constraints[constraint_type] = constraint_value
 
-                if constraint.WhichOneof('constraint') == 'endpoint_location':
+                elif kind == 'endpoint_location':
                     endpoint_id = constraint.endpoint_location.endpoint_id
                     device_uuid = endpoint_id.device_id.device_uuid.uuid
                     device_uuid = self.device_name_mapping.get(device_uuid, device_uuid)
@@ -73,7 +75,7 @@ class KDisjointPathAlgorithm(_Algorithm):
                     site_id = constraint.endpoint_location.location.region
                     endpoints.setdefault((device_uuid, endpoint_uuid), dict())['site_id'] = site_id
 
-                if constraint.WhichOneof('constraint') == 'endpoint_priority':
+                elif kind == 'endpoint_priority':
                     endpoint_id = constraint.endpoint_priority.endpoint_id
                     device_uuid = endpoint_id.device_id.device_uuid.uuid
                     device_uuid = self.device_name_mapping.get(device_uuid, device_uuid)
@@ -82,9 +84,18 @@ class KDisjointPathAlgorithm(_Algorithm):
                     priority = constraint.endpoint_priority.priority
                     endpoints.setdefault((device_uuid, endpoint_uuid), dict())['priority'] = priority
 
+                elif kind == 'sla_capacity':
+                    capacity_gbps = constraint.sla_capacity.capacity_gbps
+                    constraints['bandwidth[gbps]'] = str(capacity_gbps)
+
+                elif kind == 'sla_latency':
+                    e2e_latency_ms = constraint.sla_latency.e2e_latency_ms
+                    constraints['latency[ms]'] = str(e2e_latency_ms)
+
             # TODO: ensure these constraints are provided in the request
             if 'bandwidth[gbps]' not in constraints: constraints['bandwidth[gbps]'] = '20.0'
             if 'latency[ms]' not in constraints: constraints['latency[ms]'] = '20.0'
+            #if 'jitter[us]' not in constraints: constraints['jitter[us]'] = '50.0'
 
     def get_link_from_endpoint(self, endpoint : Dict) -> Tuple[Dict, Link]:
         device_uuid = endpoint['device_id']
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
index bfb4da05fb57bef03fb94fc8973271ceb45f619a..ee85f0bb083500c655e78798bbcd2bd00e8a4501 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
@@ -73,17 +73,22 @@ def compose_latency_characteristics(fixed_latency_characteristic : str) -> Dict:
     return {'fixed-latency-characteristic': fixed_latency_characteristic}
 
 def compose_constraint(constraint : Constraint) -> Dict:
-    if constraint.WhichOneof('constraint') != 'custom':
-        str_constraint = grpc_message_to_json_string(constraint)
-        LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint))
-        return None
-    constraint_type = constraint.custom.constraint_type
-    if constraint_type in {'diversity'}:
-        str_constraint = grpc_message_to_json_string(constraint)
-        LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint))
-        return None
-    constraint_value = constraint.custom.constraint_value
-    return {'constraint_type': constraint_type, 'constraint_value': constraint_value}
+    kind = constraint.WhichOneof('constraint')
+    if kind == 'custom':
+        constraint_type = constraint.custom.constraint_type
+        if constraint_type in {'bandwidth[gbps]', 'latency[ms]', 'jitter[us]'}:
+            constraint_value = constraint.custom.constraint_value
+            return {'constraint_type': constraint_type, 'constraint_value': constraint_value}
+    elif kind == 'sla_capacity':
+        capacity_gbps = constraint.sla_capacity.capacity_gbps
+        return {'constraint_type': 'bandwidth[gbps]', 'constraint_value': str(capacity_gbps)}
+    elif kind == 'sla_latency':
+        e2e_latency_ms = constraint.sla_latency.e2e_latency_ms
+        return {'constraint_type': 'latency[ms]', 'constraint_value': str(e2e_latency_ms)}
+
+    str_constraint = grpc_message_to_json_string(constraint)
+    LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint))
+    return None
 
 def compose_device(grpc_device : Device) -> Dict:
     device_uuid = grpc_device.device_id.device_uuid.uuid
@@ -144,6 +149,8 @@ def compose_service(grpc_service : Service) -> Dict:
         constraints.append({'constraint_type': 'bandwidth[gbps]', 'constraint_value': '20.0'})
     if 'latency[ms]' not in constraint_types:
         constraints.append({'constraint_type': 'latency[ms]', 'constraint_value': '20.0'})
+    #if 'jitter[us]' not in constraint_types:
+    #    constraints.append({'constraint_type': 'jitter[us]', 'constraint_value': '50.0'})
 
     return {
         'serviceId': service_id,
diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py
index ca9764a34ef0550351c4a0ebcdbd041805c49dde..f26d74ce4c665663735bae69dcfb5a4e14311bfa 100644
--- a/src/pathcomp/frontend/tests/Objects_A_B_C.py
+++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
-from common.tools.object_factory.Constraint import json_constraint_custom
+from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id
 from common.tools.object_factory.EndPoint import json_endpoints
@@ -97,8 +97,8 @@ LINK_C2_C3_ID, LINK_C2_C3 = compose_link(DEVICE_C2_ENDPOINTS[1], DEVICE_C3_ENDPO
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_A1_B1 = compose_service(DEVICE_A1_ENDPOINTS[2], DEVICE_B1_ENDPOINTS[2], constraints=[
-    json_constraint_custom('bandwidth[gbps]', 10.0),
-    json_constraint_custom('latency[ms]',     12.0),
+    json_constraint_sla_capacity(10.0),
+    json_constraint_sla_latency(12.0),
 ])
 
 # ----- Containers -----------------------------------------------------------------------------------------------------
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
index 1d057c10edcea30e1bf38f63d8a1ad0c6a0a4d46..9ee784e1f76026416bca9824aa8e54e2c4f874f2 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
-from common.tools.object_factory.Constraint import json_constraint_custom
+from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
@@ -139,8 +139,8 @@ LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
-    json_constraint_custom('bandwidth[gbps]', 10.0),
-    json_constraint_custom('latency[ms]',     20.0),
+    json_constraint_sla_capacity(10.0),
+    json_constraint_sla_latency(20.0),
 ])
 
 # ----- Containers -----------------------------------------------------------------------------------------------------
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
index 8f6e88719f4019edbeea36c7b4a641fbd7abbea4..71510d088746bd791e4671686dd5114874dd5a2a 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
@@ -14,7 +14,7 @@
 
 import uuid
 from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
-from common.tools.object_factory.Constraint import json_constraint_custom
+from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
@@ -149,8 +149,8 @@ LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3
 
 # ----- Service --------------------------------------------------------------------------------------------------------
 SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
-    json_constraint_custom('bandwidth[gbps]', 10.0),
-    json_constraint_custom('latency[ms]',     20.0),
+    json_constraint_sla_capacity(10.0),
+    json_constraint_sla_latency(20.0),
 ])
 
 # ----- Containers -----------------------------------------------------------------------------------------------------
diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py
index fd14c8a7aed4ec6e1a1c73aaa9425008abe7db60..8088259b80b8ade2669568b74f004dcfa631dd9c 100644
--- a/src/pathcomp/frontend/tests/test_unitary.py
+++ b/src/pathcomp/frontend/tests/test_unitary.py
@@ -18,12 +18,11 @@ from common.proto.pathcomp_pb2 import PathCompRequest
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Constraint import (
     json_constraint_custom, json_constraint_endpoint_location_region, json_constraint_endpoint_priority,
-    json_constraint_sla_availability)
+    json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_latency)
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.tools.object_factory.Service import json_service_l3nm_planned
 from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 
 # Scenarios:
@@ -90,8 +89,8 @@ def test_request_service_shortestpath(
 
     request_services = copy.deepcopy(SERVICES)
     #request_services[0]['service_constraints'] = [
-    #    json_constraint_custom('bandwidth[gbps]', 1000.0),
-    #    json_constraint_custom('latency[ms]',     1200.0),
+    #    json_constraint_sla_capacity(1000.0),
+    #    json_constraint_sla_latency(1200.0),
     #]
     pathcomp_request = PathCompRequest(services=request_services)
     pathcomp_request.shortest_path.Clear()  # hack to select the shortest path algorithm that has no attributes
@@ -202,9 +201,9 @@ def test_request_service_kdisjointpath(
     ]
     
     endpoint_ids, constraints = [], [
-        json_constraint_custom('bandwidth[gbps]', 10.0),
-        json_constraint_custom('latency[ms]',     12.0),
-        json_constraint_sla_availability(2, True),
+        json_constraint_sla_capacity(10.0),
+        json_constraint_sla_latency(12.0),
+        json_constraint_sla_availability(2, True, 50.0),
         json_constraint_custom('diversity', {'end-to-end-diverse': 'all-other-accesses'}),
     ]
 
diff --git a/src/pathcomp/misc/example-results-kdisjointpaths.json b/src/pathcomp/misc/example-results-kdisjointpaths.json
index 9eda25d484e45db53471ea3f655d511cbcc42c18..c1dbf3a3c7bc6335f0d0c765b6622ce070b7774e 100644
--- a/src/pathcomp/misc/example-results-kdisjointpaths.json
+++ b/src/pathcomp/misc/example-results-kdisjointpaths.json
@@ -64,8 +64,8 @@
             ],
             "service_status": {"service_status": "SERVICESTATUS_PLANNED"},
             "service_constraints": [
-                {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}},
-                {"custom": {"constraint_type": "latency[ms]", "constraint_value": "12.0"}}
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 12.0}}
             ],
             "service_config": {"config_rules": []}
         }
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index 622abeee860cdb6ce8153b7def9fb91ea1117277..0b2e0760161c109a2ba6a5feecc931e8bcf5c14f 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -38,8 +38,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def CreateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId:
-        LOGGER.info('[CreateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request)))
-
         if len(request.service_endpoint_ids) > 0:
             unexpected_endpoints = []
             for service_endpoint_id in request.service_endpoint_ids:
@@ -85,8 +83,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId:
-        LOGGER.info('[UpdateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request)))
-
         # Set service status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the service is
         # being modified.
         context_client = ContextClient()
@@ -112,27 +108,30 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
         service_id_with_uuids = context_client.SetService(service)
         service_with_uuids = context_client.GetService(service_id_with_uuids)
 
-        num_disjoint_paths = None
+        num_disjoint_paths = 0
         for constraint in request.service_constraints:
             if constraint.WhichOneof('constraint') == 'sla_availability':
                 num_disjoint_paths = constraint.sla_availability.num_disjoint_paths
                 break
 
+        num_disjoint_paths = 1 if num_disjoint_paths is None or num_disjoint_paths == 0 else num_disjoint_paths
+        num_expected_endpoints = num_disjoint_paths * 2
+
         tasks_scheduler = TasksScheduler(self.service_handler_factory)
-        if len(service_with_uuids.service_endpoint_ids) >= (2 if num_disjoint_paths is None else 4):
+        if len(service_with_uuids.service_endpoint_ids) >= num_expected_endpoints:
             pathcomp_request = PathCompRequest()
             pathcomp_request.services.append(service_with_uuids)    # pylint: disable=no-member
 
-            if num_disjoint_paths is None:
+            if num_disjoint_paths is None or num_disjoint_paths in {0, 1}:
                 pathcomp_request.shortest_path.Clear()              # pylint: disable=no-member
             else:
                 pathcomp_request.k_disjoint_path.num_disjoint = num_disjoint_paths  # pylint: disable=no-member
 
-            LOGGER.info('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request)))
+            LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request)))
             pathcomp = PathCompClient()
             pathcomp_reply = pathcomp.Compute(pathcomp_request)
             pathcomp.close()
-            LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply)))
+            LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply)))
 
             # Feed TaskScheduler with this path computation reply. TaskScheduler identifies inter-dependencies among
             # the services and connections retrieved and produces a schedule of tasks (an ordered list of tasks to be
@@ -144,8 +143,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def DeleteService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty:
-        LOGGER.info('[DeleteService] begin ; request = {:s}'.format(grpc_message_to_json_string(request)))
-
         context_client = ContextClient()
 
         # Set service status to "SERVICESTATUS_PENDING_REMOVAL" to ensure rest of components are aware the service is
diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py
index f55527e4756022fc4941605f54ab82b74c0937f0..fbc554aa261cbc68009258d322aa01d52bfe760d 100644
--- a/src/service/service/task_scheduler/TaskScheduler.py
+++ b/src/service/service/task_scheduler/TaskScheduler.py
@@ -130,7 +130,7 @@ class TasksScheduler:
                 self._dag.add(connection_key, service_key_done)
 
         t1 = time.time()
-        LOGGER.info('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0))
+        LOGGER.debug('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0))
 
     def compose_from_service(self, service : Service, is_delete : bool = False) -> None:
         t0 = time.time()
@@ -196,11 +196,11 @@ class TasksScheduler:
                 raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item)))
 
         t1 = time.time()
-        LOGGER.info('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0))
+        LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0))
 
     def execute_all(self, dry_run : bool = False) -> None:
         ordered_task_keys = list(self._dag.static_order())
-        LOGGER.info('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys)))
+        LOGGER.debug('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys)))
 
         results = []
         for task_key in ordered_task_keys:
@@ -208,5 +208,5 @@ class TasksScheduler:
             succeeded = True if dry_run else task.execute()
             results.append(succeeded)
 
-        LOGGER.info('[execute_all] results={:s}'.format(str(results)))
+        LOGGER.debug('[execute_all] results={:s}'.format(str(results)))
         return zip(ordered_task_keys, results)
diff --git a/src/slice/client/SliceClient.py b/src/slice/client/SliceClient.py
index a3e5d649032bbf939f9ba6d812b270ca3384cc06..792a2037f0a7cb47d6f0c2e7969708425b57b3a6 100644
--- a/src/slice/client/SliceClient.py
+++ b/src/slice/client/SliceClient.py
@@ -65,3 +65,17 @@ class SliceClient:
         response = self.stub.DeleteSlice(request)
         LOGGER.debug('DeleteSlice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
+
+    @RETRY_DECORATOR
+    def OrderSliceWithSLA(self, request : Slice) -> SliceId:
+        LOGGER.debug('OrderSliceWithSLA request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.OrderSliceWithSLA(request)
+        LOGGER.debug('OrderSliceWithSLA result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RunSliceGrouping(self, request : Empty) -> Empty:
+        LOGGER.debug('RunSliceGrouping request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RunSliceGrouping(request)
+        LOGGER.debug('RunSliceGrouping result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/slice/requirements.in b/src/slice/requirements.in
index daef740da4729659fb3117eadff31994acdf5746..854c71a5948e91077fba4561f961083ed90b0861 100644
--- a/src/slice/requirements.in
+++ b/src/slice/requirements.in
@@ -12,5 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 #deepdiff==5.8.*
+numpy==1.23.*
+pandas==1.5.*
+questdb==1.0.1
+requests==2.27.*
+scikit-learn==1.1.*
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index 21d820089aad9531834187e129d893e90f3c93a8..acec3ae303266714ae7f50c5c0d78fc41d350ea1 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -24,10 +24,11 @@ from common.tools.grpc.ConfigRules import copy_config_rules
 from common.tools.grpc.Constraints import copy_constraints
 from common.tools.grpc.EndPointIds import copy_endpoint_ids
 from common.tools.grpc.ServiceIds import update_service_ids
-from common.tools.grpc.Tools import grpc_message_to_json_string
+#from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from interdomain.client.InterdomainClient import InterdomainClient
 from service.client.ServiceClient import ServiceClient
+from .slice_grouper.SliceGrouper import SliceGrouper
 
 LOGGER = logging.getLogger(__name__)
 
@@ -36,6 +37,7 @@ METRICS_POOL = MetricsPool('Slice', 'RPC')
 class SliceServiceServicerImpl(SliceServiceServicer):
     def __init__(self):
         LOGGER.debug('Creating Servicer...')
+        self._slice_grouper = SliceGrouper()
         LOGGER.debug('Servicer Created')
 
     def create_update(self, request : Slice) -> SliceId:
@@ -62,7 +64,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             # unable to identify the kind of slice; just update endpoints, constraints and config rules
             # update the slice in database, and return
             # pylint: disable=no-member
-            return context_client.SetSlice(slice_rw)
+            reply = context_client.SetSlice(slice_rw)
+            context_client.close()
+            return reply
 
         slice_with_uuids = context_client.GetSlice(slice_id_with_uuids)
 
@@ -80,8 +84,13 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             slice_active.CopyFrom(slice_)
             slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
             context_client.SetSlice(slice_active)
+            interdomain_client.close()
+            context_client.close()
             return slice_id
 
+        if self._slice_grouper.is_enabled:
+            grouped = self._slice_grouper.group(slice_with_uuids) # pylint: disable=unused-variable
+
         # Local domain slice
         service_id = ServiceId()
         # pylint: disable=no-member
@@ -109,13 +118,13 @@ class SliceServiceServicerImpl(SliceServiceServicer):
 
         service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN
         for config_rule in request.slice_config.config_rules:
-            LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule)))
+            #LOGGER.debug('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule)))
             config_rule_kind = config_rule.WhichOneof('config_rule')
-            LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind)))
+            #LOGGER.debug('config_rule_kind: {:s}'.format(str(config_rule_kind)))
             if config_rule_kind != 'custom': continue
             custom = config_rule.custom
             resource_key = custom.resource_key
-            LOGGER.info('resource_key: {:s}'.format(str(resource_key)))
+            #LOGGER.debug('resource_key: {:s}'.format(str(resource_key)))
 
             # TODO: parse resource key with regular expression, e.g.:
             #    m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s)
@@ -123,21 +132,21 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             if not resource_key.endswith('/settings'): continue
 
             resource_value = json.loads(custom.resource_value)
-            LOGGER.info('resource_value: {:s}'.format(str(resource_value)))
+            #LOGGER.debug('resource_value: {:s}'.format(str(resource_value)))
 
             if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN:
                 if (resource_value.get('address_ip') is not None and \
                     resource_value.get('address_prefix') is not None):
                     service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
-                    LOGGER.info('is L3')
+                    #LOGGER.debug('is L3')
                 else:
                     service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM
-                    LOGGER.info('is L2')
+                    #LOGGER.debug('is L2')
                 break
 
         if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN:
             service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM
-            LOGGER.info('assume L2')
+            #LOGGER.debug('assume L2')
 
         service_client.UpdateService(service_request)
 
@@ -154,6 +163,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         slice_active.CopyFrom(slice_)
         slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
         context_client.SetSlice(slice_active)
+
+        service_client.close()
+        context_client.close()
         return slice_id
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
@@ -190,6 +202,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         try:
             _slice = context_client.GetSlice(request)
         except: # pylint: disable=bare-except
+            context_client.close()
             return Empty()
 
         if is_multi_domain(context_client, _slice.slice_endpoint_ids):
@@ -202,6 +215,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             current_slice.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member
             context_client.SetSlice(current_slice)
 
+            if self._slice_grouper.is_enabled:
+                ungrouped = self._slice_grouper.ungroup(current_slice) # pylint: disable=unused-variable
+
             service_client = ServiceClient()
             for service_id in _slice.slice_service_ids:
                 current_slice = Slice()
@@ -211,6 +227,8 @@ class SliceServiceServicerImpl(SliceServiceServicer):
                 context_client.UnsetSlice(current_slice)
 
                 service_client.DeleteService(service_id)
+            service_client.close()
 
         context_client.RemoveSlice(request)
+        context_client.close()
         return Empty()
diff --git a/src/slice/service/slice_grouper/Constants.py b/src/slice/service/slice_grouper/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..2edd853a2202fc64f107ea8c6688d19d6ab2692e
--- /dev/null
+++ b/src/slice/service/slice_grouper/Constants.py
@@ -0,0 +1,22 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO: define by means of settings
+SLICE_GROUPS = [
+    ('bronze',   10.0,  10.0), # Bronze   (10%, 10Gb/s)
+    ('silver',   30.0,  40.0), # Silver   (30%, 40Gb/s)
+    ('gold',     70.0,  50.0), # Gold     (70%, 50Gb/s)
+    ('platinum', 99.0, 100.0), # Platinum (99%, 100Gb/s)
+]
+SLICE_GROUP_NAMES = {slice_group[0] for slice_group in SLICE_GROUPS}
diff --git a/src/slice/service/slice_grouper/MetricsExporter.py b/src/slice/service/slice_grouper/MetricsExporter.py
new file mode 100644
index 0000000000000000000000000000000000000000..3708641eef64e100fae18e875a4fbc4896357057
--- /dev/null
+++ b/src/slice/service/slice_grouper/MetricsExporter.py
@@ -0,0 +1,126 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging, os, requests
+from typing import Any, Literal, Union
+from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module
+
+LOGGER = logging.getLogger(__name__)
+
+MAX_RETRIES = 10
+DELAY_RETRIES = 0.5
+
+MSG_EXPORT_EXECUTED   = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed'
+MSG_EXPORT_FAILED     = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...'
+MSG_REST_BAD_STATUS   = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}'
+MSG_REST_EXECUTED     = '[rest_request] Query({:s}) executed, result: {:s}'
+MSG_REST_FAILED       = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...'
+MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}'
+
+METRICSDB_HOSTNAME  = os.environ.get('METRICSDB_HOSTNAME')
+METRICSDB_ILP_PORT  = int(os.environ.get('METRICSDB_ILP_PORT'))
+METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT'))
+METRICSDB_TABLE_SLICE_GROUPS = os.environ.get('METRICSDB_TABLE_SLICE_GROUPS')
+
+COLORS = {
+    'platinum': '#E5E4E2',
+    'gold'    : '#FFD700',
+    'silver'  : '#808080',
+    'bronze'  : '#CD7F32',
+}
+DEFAULT_COLOR = '#000000' # black
+
+SQL_MARK_DELETED = "UPDATE {:s} SET is_deleted='true' WHERE slice_uuid='{:s}';"
+
+class MetricsExporter():
+    def create_table(self) -> None:
+        sql_query = ' '.join([
+            'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)),
+            ','.join([
+                'timestamp TIMESTAMP',
+                'slice_uuid SYMBOL',
+                'slice_group SYMBOL',
+                'slice_color SYMBOL',
+                'is_deleted SYMBOL',
+                'slice_availability DOUBLE',
+                'slice_capacity_center DOUBLE',
+                'slice_capacity DOUBLE',
+            ]),
+            ') TIMESTAMP(timestamp);'
+        ])
+        try:
+            result = self.rest_request(sql_query)
+            if not result: raise Exception
+            LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS)))
+        except Exception as e:
+            LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e)))
+            raise
+
+    def export_point(
+        self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float,
+        is_center : bool = False
+    ) -> None:
+        dt_timestamp = datetime.datetime.utcnow()
+        slice_color = COLORS.get(slice_group, DEFAULT_COLOR)
+        symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color, is_deleted='false')
+        columns = dict(slice_availability=slice_availability)
+        columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity
+
+        for retry in range(MAX_RETRIES):
+            try:
+                with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender:
+                    sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp)
+                    sender.flush()
+                LOGGER.debug(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns)))
+                return
+            except (Exception, IngressError): # pylint: disable=broad-except
+                LOGGER.exception(MSG_EXPORT_FAILED.format(
+                    str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
+
+    def delete_point(self, slice_uuid : str) -> None:
+        sql_query = SQL_MARK_DELETED.format(str(METRICSDB_TABLE_SLICE_GROUPS), slice_uuid)
+        try:
+            result = self.rest_request(sql_query)
+            if not result: raise Exception
+            LOGGER.debug('Point {:s} deleted'.format(str(slice_uuid)))
+        except Exception as e:
+            LOGGER.warning('Point {:s} cannot be deleted. {:s}'.format(str(slice_uuid), str(e)))
+            raise
+
+    def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]:
+        url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT)
+        params = {'query': rest_query, 'fmt': 'json'}
+
+        for retry in range(MAX_RETRIES):
+            try:
+                response = requests.get(url, params=params)
+                status_code = response.status_code
+                if status_code not in {200}:
+                    str_content = response.content.decode('UTF-8')
+                    raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content))
+
+                json_response = response.json()
+                if 'ddl' in json_response:
+                    LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl'])))
+                    return True
+                elif 'dataset' in json_response:
+                    LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset'])))
+                    return json_response['dataset']
+
+            except Exception: # pylint: disable=broad-except
+                LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
diff --git a/src/slice/service/slice_grouper/SliceGrouper.py b/src/slice/service/slice_grouper/SliceGrouper.py
new file mode 100644
index 0000000000000000000000000000000000000000..735d028993eb11e83138caebde1e32ebc830093f
--- /dev/null
+++ b/src/slice/service/slice_grouper/SliceGrouper.py
@@ -0,0 +1,94 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, pandas, threading
+from typing import Dict, Optional, Tuple
+from sklearn.cluster import KMeans
+from common.proto.context_pb2 import Slice
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from .Constants import SLICE_GROUPS
+from .MetricsExporter import MetricsExporter
+from .Tools import (
+    add_slice_to_group, create_slice_groups, get_slice_grouping_parameters, is_slice_grouping_enabled,
+    remove_slice_from_group)
+
+LOGGER = logging.getLogger(__name__)
+
+class SliceGrouper:
+    def __init__(self) -> None:
+        self._lock = threading.Lock()
+        self._is_enabled = is_slice_grouping_enabled()
+        if not self._is_enabled: return
+
+        metrics_exporter = MetricsExporter()
+        metrics_exporter.create_table()
+
+        self._slice_groups = create_slice_groups(SLICE_GROUPS)
+
+        # Initialize and fit K-Means with the pre-defined clusters we want, i.e., one per slice group
+        df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps'])
+        k_means = KMeans(n_clusters=df_groups.shape[0])
+        k_means.fit(df_groups[['availability', 'capacity_gbps']])
+        df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity_gbps']])
+        self._k_means = k_means
+        self._df_groups = df_groups
+
+        self._group_mapping : Dict[str, Dict] = {
+            group['name']:{k:v for k,v in group.items() if k != 'name'}
+            for group in list(df_groups.to_dict('records'))
+        }
+
+        label_to_group = {}
+        for group_name,group_attrs in self._group_mapping.items():
+            label = group_attrs['label']
+            availability = group_attrs['availability']
+            capacity_gbps = group_attrs['capacity_gbps']
+            metrics_exporter.export_point(
+                group_name, group_name, availability, capacity_gbps, is_center=True)
+            label_to_group[label] = group_name
+        self._label_to_group = label_to_group
+
+    def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]:
+        with self._lock:
+            grouping_parameters = get_slice_grouping_parameters(slice_obj)
+            LOGGER.debug('[_select_group] grouping_parameters={:s}'.format(str(grouping_parameters)))
+            if grouping_parameters is None: return None
+
+            sample = pandas.DataFrame([grouping_parameters], columns=['availability', 'capacity_gbps'])
+            sample['label'] = self._k_means.predict(sample)
+            sample = sample.to_dict('records')[0]   # pylint: disable=unsubscriptable-object
+            LOGGER.debug('[_select_group] sample={:s}'.format(str(sample)))
+            label = sample['label']
+            availability = sample['availability']
+            capacity_gbps = sample['capacity_gbps']
+            group_name = self._label_to_group[label]
+            LOGGER.debug('[_select_group] group_name={:s}'.format(str(group_name)))
+            return group_name, availability, capacity_gbps
+
+    @property
+    def is_enabled(self): return self._is_enabled
+
+    def group(self, slice_obj : Slice) -> bool:
+        LOGGER.debug('[group] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
+        selected_group = self._select_group(slice_obj)
+        LOGGER.debug('[group] selected_group={:s}'.format(str(selected_group)))
+        if selected_group is None: return False
+        return add_slice_to_group(slice_obj, selected_group)
+
+    def ungroup(self, slice_obj : Slice) -> bool:
+        LOGGER.debug('[ungroup] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
+        selected_group = self._select_group(slice_obj)
+        LOGGER.debug('[ungroup] selected_group={:s}'.format(str(selected_group)))
+        if selected_group is None: return False
+        return remove_slice_from_group(slice_obj, selected_group)
diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca957f3c7760eb65b649d22ecb5b57dee3e08dab
--- /dev/null
+++ b/src/slice/service/slice_grouper/Tools.py
@@ -0,0 +1,177 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List, Optional, Set, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.Settings import get_setting
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum
+from common.tools.context_queries.Context import create_context
+from common.tools.context_queries.Slice import get_slice
+from context.client.ContextClient import ContextClient
+from slice.service.slice_grouper.MetricsExporter import MetricsExporter
+
+SETTING_NAME_SLICE_GROUPING = 'SLICE_GROUPING'
+TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'}
+
+NO_ISOLATION = IsolationLevelEnum.NO_ISOLATION
+
+def is_slice_grouping_enabled() -> bool:
+    is_enabled = get_setting(SETTING_NAME_SLICE_GROUPING, default=None)
+    if is_enabled is None: return False
+    str_is_enabled = str(is_enabled).upper()
+    return str_is_enabled in TRUE_VALUES
+
+def create_slice_group(
+    context_uuid : str, slice_name : str, capacity_gbps : float, availability : float
+) -> Slice:
+    slice_group_obj = Slice()
+    slice_group_obj.slice_id.context_id.context_uuid.uuid = context_uuid            # pylint: disable=no-member
+    slice_group_obj.slice_id.slice_uuid.uuid = slice_name                           # pylint: disable=no-member
+    slice_group_obj.name = slice_name
+    slice_group_obj.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE  # pylint: disable=no-member
+    #del slice_group_obj.slice_endpoint_ids[:] # no endpoints initially
+    #del slice_group_obj.slice_service_ids[:] # no sub-services
+    #del slice_group_obj.slice_subslice_ids[:] # no sub-slices
+    #del slice_group_obj.slice_config.config_rules[:] # no config rules
+    slice_group_obj.slice_owner.owner_uuid.uuid = 'TeraFlowSDN'                     # pylint: disable=no-member
+    slice_group_obj.slice_owner.owner_string = 'TeraFlowSDN'                        # pylint: disable=no-member
+
+    constraint_sla_capacity = slice_group_obj.slice_constraints.add()               # pylint: disable=no-member
+    constraint_sla_capacity.sla_capacity.capacity_gbps = capacity_gbps
+
+    constraint_sla_availability = slice_group_obj.slice_constraints.add()           # pylint: disable=no-member
+    constraint_sla_availability.sla_availability.num_disjoint_paths = 1
+    constraint_sla_availability.sla_availability.all_active = True
+    constraint_sla_availability.sla_availability.availability = availability
+
+    constraint_sla_isolation = slice_group_obj.slice_constraints.add()              # pylint: disable=no-member
+    constraint_sla_isolation.sla_isolation.isolation_level.append(NO_ISOLATION)
+
+    return slice_group_obj
+
+def create_slice_groups(
+    slice_groups : List[Tuple[str, float, float]], context_uuid : str = DEFAULT_CONTEXT_NAME
+) -> Dict[str, SliceId]:
+    context_client = ContextClient()
+    create_context(context_client, context_uuid)
+
+    slice_group_ids : Dict[str, SliceId] = dict()
+    for slice_group in slice_groups:
+        slice_group_name = slice_group[0]
+        slice_group_obj = get_slice(context_client, slice_group_name, DEFAULT_CONTEXT_NAME)
+        if slice_group_obj is None:
+            slice_group_obj = create_slice_group(
+                DEFAULT_CONTEXT_NAME, slice_group_name, slice_group[2], slice_group[1])
+            slice_group_id = context_client.SetSlice(slice_group_obj)
+            slice_group_ids[slice_group_name] = slice_group_id
+        else:
+            slice_group_ids[slice_group_name] = slice_group_obj.slice_id
+
+    return slice_group_ids
+
+def get_slice_grouping_parameters(slice_obj : Slice) -> Optional[Tuple[float, float]]:
+    isolation_levels : Set[int] = set()
+    availability : Optional[float] = None
+    capacity_gbps : Optional[float] = None
+
+    for constraint in slice_obj.slice_constraints:
+        kind = constraint.WhichOneof('constraint')
+        if kind == 'sla_isolation':
+            isolation_levels.update(constraint.sla_isolation.isolation_level)
+        elif kind == 'sla_capacity':
+            capacity_gbps = constraint.sla_capacity.capacity_gbps
+        elif kind == 'sla_availability':
+            availability = constraint.sla_availability.availability
+        else:
+            continue
+
+    no_isolation_level = len(isolation_levels) == 0
+    single_isolation_level = len(isolation_levels) == 1
+    has_no_isolation_level = NO_ISOLATION in isolation_levels
+    can_be_grouped = no_isolation_level or (single_isolation_level and has_no_isolation_level)
+    if not can_be_grouped: return None
+    if availability is None: return None
+    if capacity_gbps is None: return None
+    return availability, capacity_gbps
+
+def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool:
+    group_name, availability, capacity_gbps = selected_group
+    slice_uuid = slice_obj.slice_id.slice_uuid.uuid
+
+    context_client = ContextClient()
+    slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
+    if slice_group_obj is None:
+        raise NotFoundException('Slice', group_name, extra_details='while adding to group')
+
+    del slice_group_obj.slice_endpoint_ids[:]
+    for endpoint_id in slice_obj.slice_endpoint_ids:
+        slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id)
+
+    del slice_group_obj.slice_constraints[:]
+    del slice_group_obj.slice_service_ids[:]
+
+    del slice_group_obj.slice_subslice_ids[:]
+    slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id)
+
+    del slice_group_obj.slice_config.config_rules[:]
+    for config_rule in slice_obj.slice_config.config_rules:
+        group_config_rule = slice_group_obj.slice_config.config_rules.add()
+        group_config_rule.CopyFrom(config_rule)
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        TEMPLATE = '/subslice[{:s}]{:s}'
+        slice_resource_key = config_rule.custom.resource_key
+        group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key)
+        group_config_rule.custom.resource_key = group_resource_key
+
+    context_client.SetSlice(slice_group_obj)
+
+    metrics_exporter = MetricsExporter()
+    metrics_exporter.export_point(
+        slice_uuid, group_name, availability, capacity_gbps, is_center=False)
+
+    return True
+
+def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool:
+    group_name, _, _ = selected_group
+    slice_uuid = slice_obj.slice_id.slice_uuid.uuid
+
+    context_client = ContextClient()
+    slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
+    if slice_group_obj is None:
+        raise NotFoundException('Slice', group_name, extra_details='while removing from group')
+
+    if slice_obj.slice_id in slice_group_obj.slice_subslice_ids:
+        tmp_slice_group_obj = Slice()
+        tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id)             # pylint: disable=no-member
+
+        tmp_slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id)   # pylint: disable=no-member
+
+        for endpoint_id in slice_obj.slice_endpoint_ids:
+            tmp_slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id)      # pylint: disable=no-member
+
+        for config_rule in slice_obj.slice_config.config_rules:
+            group_config_rule = tmp_slice_group_obj.slice_config.config_rules.add() # pylint: disable=no-member
+            group_config_rule.CopyFrom(config_rule)
+            if group_config_rule.WhichOneof('config_rule') != 'custom': continue
+            TEMPLATE = '/subslice[{:s}]{:s}'
+            slice_resource_key = group_config_rule.custom.resource_key
+            group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key)
+            group_config_rule.custom.resource_key = group_resource_key
+
+        context_client.UnsetSlice(tmp_slice_group_obj)
+
+    metrics_exporter = MetricsExporter()
+    metrics_exporter.delete_point(slice_uuid)
+    return True
diff --git a/src/slice/service/slice_grouper/__init__.py b/src/slice/service/slice_grouper/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/slice/service/slice_grouper/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/slice/tests/old/Main.py b/src/slice/tests/old/Main.py
new file mode 100644
index 0000000000000000000000000000000000000000..0924f1c646e9722bf23354d0787786375663e85f
--- /dev/null
+++ b/src/slice/tests/old/Main.py
@@ -0,0 +1,98 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os, pandas, random, sys, time
+#from matplotlib import pyplot as plt
+from sklearn.cluster import KMeans
+from typing import Dict, List, Tuple
+
+os.environ['METRICSDB_HOSTNAME' ] = '127.0.0.1' #'questdb-public.qdb.svc.cluster.local'
+os.environ['METRICSDB_ILP_PORT' ] = '9009'
+os.environ['METRICSDB_REST_PORT'] = '9000'
+
+from .MetricsExporter import MetricsExporter # pylint: disable=wrong-import-position
+
+logging.basicConfig(level=logging.DEBUG)
+LOGGER : logging.Logger = logging.getLogger(__name__)
+
+def get_random_slices(count : int) -> List[Tuple[str, float, float]]:
+    slices = list()
+    for i in range(count):
+        slice_name          = 'slice-{:03d}'.format(i)
+        slice_availability  = random.uniform(00.0, 99.99)
+        slice_capacity_gbps = random.uniform(0.1, 100.0)
+        slices.append((slice_name, slice_availability, slice_capacity_gbps))
+    return slices
+
+def init_kmeans() -> Tuple[KMeans, Dict[str, int]]:
+    groups = [
+        # Name, avail[0..100], bw_gbps[0..100]
+        ('bronze',   10.0,  10.0), # ('silver',   25.0,  25.0),
+        ('silver',   30.0,  40.0), # ('silver',   25.0,  25.0),
+        ('gold',     70.0,  50.0), # ('gold',     90.0,  50.0),
+        ('platinum', 99.0, 100.0),
+    ]
+    df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity'])
+
+    num_clusters = len(groups)
+    k_means = KMeans(n_clusters=num_clusters)
+    k_means.fit(df_groups[['availability', 'capacity']])
+
+    df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']])
+    mapping = {
+        group['name']:{k:v for k,v in group.items() if k != 'name'}
+        for group in list(df_groups.to_dict('records'))
+    }
+
+    return k_means, mapping
+
+def main():
+    LOGGER.info('Starting...')
+    metrics_exporter = MetricsExporter()
+    metrics_exporter.create_table()
+
+    k_means, mapping = init_kmeans()
+    label_to_group = {}
+    for group_name,group_attrs in mapping.items():
+        label = group_attrs['label']
+        availability = group_attrs['availability']
+        capacity = group_attrs['capacity']
+        metrics_exporter.export_point(group_name, group_name, availability, capacity, is_center=True)
+        label_to_group[label] = group_name
+
+    slices = get_random_slices(10000)
+    for slice_ in slices:
+        sample = pandas.DataFrame([slice_[1:3]], columns=['availability', 'capacity'])
+        sample['label'] = k_means.predict(sample)
+        sample = sample.to_dict('records')[0]
+        label = sample['label']
+        availability = sample['availability']
+        capacity = sample['capacity']
+        group_name = label_to_group[label]
+        metrics_exporter.export_point(slice_[0], group_name, availability, capacity, is_center=False)
+        time.sleep(0.01)
+
+    #df_silver   = df_slices[df_slices['group']==mapping['silver']]
+    #df_gold     = df_slices[df_slices['group']==mapping['gold']]
+    #df_platinum = df_slices[df_slices['group']==mapping['platinum']]
+    #plt.scatter(df_silver.availability,         df_silver.capacity,             s=25,  c='black' )
+    #plt.scatter(df_gold.availability,           df_gold.capacity,               s=25,  c='gold'  )
+    #plt.scatter(df_platinum.availability,       df_platinum.capacity,           s=25,  c='silver')
+    #plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red'   )
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/slice/tests/old/MetricsExporter.py b/src/slice/tests/old/MetricsExporter.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c04cb9fcb1c7ab05c5274fb8e2a934a39b4cfdd
--- /dev/null
+++ b/src/slice/tests/old/MetricsExporter.py
@@ -0,0 +1,116 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging, os, requests
+from typing import Any, Literal, Union
+from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module
+
+LOGGER = logging.getLogger(__name__)
+
+MAX_RETRIES = 10
+DELAY_RETRIES = 0.5
+
+MSG_EXPORT_EXECUTED   = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed'
+MSG_EXPORT_FAILED     = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...'
+MSG_REST_BAD_STATUS   = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}'
+MSG_REST_EXECUTED     = '[rest_request] Query({:s}) executed, result: {:s}'
+MSG_REST_FAILED       = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...'
+MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}'
+
+METRICSDB_HOSTNAME  = os.environ.get('METRICSDB_HOSTNAME')
+METRICSDB_ILP_PORT  = int(os.environ.get('METRICSDB_ILP_PORT'))
+METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT'))
+METRICSDB_TABLE_SLICE_GROUPS = 'slice_groups'
+
+COLORS = {
+    'platinum': '#E5E4E2',
+    'gold'    : '#FFD700',
+    'silver'  : '#808080',
+    'bronze'  : '#CD7F32',
+}
+DEFAULT_COLOR = '#000000' # black
+
+class MetricsExporter():
+    def __init__(self) -> None:
+        pass
+
+    def create_table(self) -> None:
+        sql_query = ' '.join([
+            'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)),
+            ','.join([
+                'timestamp TIMESTAMP',
+                'slice_uuid SYMBOL',
+                'slice_group SYMBOL',
+                'slice_color SYMBOL',
+                'slice_availability DOUBLE',
+                'slice_capacity_center DOUBLE',
+                'slice_capacity DOUBLE',
+            ]),
+            ') TIMESTAMP(timestamp);'
+        ])
+        try:
+            result = self.rest_request(sql_query)
+            if not result: raise Exception
+            LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS)))
+        except Exception as e:
+            LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e)))
+            raise
+
+    def export_point(
+        self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float,
+        is_center : bool = False
+    ) -> None:
+        dt_timestamp = datetime.datetime.utcnow()
+        slice_color = COLORS.get(slice_group, DEFAULT_COLOR)
+        symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color)
+        columns = dict(slice_availability=slice_availability)
+        columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity
+
+        for retry in range(MAX_RETRIES):
+            try:
+                with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender:
+                    sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp)
+                    sender.flush()
+                LOGGER.info(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns)))
+                return
+            except (Exception, IngressError): # pylint: disable=broad-except
+                LOGGER.exception(MSG_EXPORT_FAILED.format(
+                    str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
+
+    def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]:
+        url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT)
+        params = {'query': rest_query, 'fmt': 'json'}
+
+        for retry in range(MAX_RETRIES):
+            try:
+                response = requests.get(url, params=params)
+                status_code = response.status_code
+                if status_code not in {200}:
+                    str_content = response.content.decode('UTF-8')
+                    raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content))
+
+                json_response = response.json()
+                if 'ddl' in json_response:
+                    LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl'])))
+                    return True
+                elif 'dataset' in json_response:
+                    LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset'])))
+                    return json_response['dataset']
+
+            except Exception: # pylint: disable=broad-except
+                LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
diff --git a/src/slice/tests/old/test_kmeans.py b/src/slice/tests/old/test_kmeans.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f54621c57c3bfcc1741591e5d0a87781e640420
--- /dev/null
+++ b/src/slice/tests/old/test_kmeans.py
@@ -0,0 +1,77 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import pandas, random, sys
+from matplotlib import pyplot as plt
+from sklearn.cluster import KMeans
+from typing import Dict, List, Tuple
+
+def get_random_slices(count : int) -> List[Tuple[str, float, float]]:
+    slices = list()
+    for i in range(count):
+        slice_name          = 'slice-{:03d}'.format(i)
+        slice_availability  = random.uniform(00.0, 99.99)
+        slice_capacity_gbps = random.uniform(0.1, 100.0)
+        slices.append((slice_name, slice_availability, slice_capacity_gbps))
+    return slices
+
+def init_kmeans() -> Tuple[KMeans, Dict[str, int]]:
+    groups = [
+        # Name, avail[0..100], bw_gbps[0..100]
+        ('silver',   25.0,  50.0), # ('silver',   25.0,  25.0),
+        ('gold',     90.0,  10.0), # ('gold',     90.0,  50.0),
+        ('platinum', 99.0, 100.0),
+    ]
+    df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity'])
+
+    num_clusters = len(groups)
+    k_means = KMeans(n_clusters=num_clusters)
+    k_means.fit(df_groups[['availability', 'capacity']])
+
+    df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']])
+    mapping = {group['name']:group['label'] for group in list(df_groups.to_dict('records'))}
+
+    return k_means, mapping
+
+def main():
+    k_means, mapping = init_kmeans()
+    slices = get_random_slices(500)
+    df_slices = pandas.DataFrame(slices, columns=['slice_uuid', 'availability', 'capacity'])
+
+    # predict one
+    #sample = df_slices[['availability', 'capacity']].iloc[[0]]
+    #y_predicted = k_means.predict(sample)
+    #y_predicted
+
+    df_slices['group'] = k_means.predict(df_slices[['availability', 'capacity']])
+
+    df_silver   = df_slices[df_slices['group']==mapping['silver']]
+    df_gold     = df_slices[df_slices['group']==mapping['gold']]
+    df_platinum = df_slices[df_slices['group']==mapping['platinum']]
+
+    plt.scatter(df_silver.availability,         df_silver.capacity,             s=25,  c='black' )
+    plt.scatter(df_gold.availability,           df_gold.capacity,               s=25,  c='gold'  )
+    plt.scatter(df_platinum.availability,       df_platinum.capacity,           s=25,  c='silver')
+    plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red'   )
+    plt.xlabel('service-slo-availability')
+    plt.ylabel('service-slo-one-way-bandwidth')
+    #ax = plt.subplot(1, 1, 1)
+    #ax.set_ylim(bottom=0., top=1.)
+    #ax.set_xlim(left=0.)
+    plt.savefig('slice_grouping.png')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/slice/tests/old/test_subslices.py b/src/slice/tests/old/test_subslices.py
new file mode 100644
index 0000000000000000000000000000000000000000..39ee235df0e9d263244fa14436f609397bcea84f
--- /dev/null
+++ b/src/slice/tests/old/test_subslices.py
@@ -0,0 +1,96 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sqlalchemy, sys
+from sqlalchemy import Column, ForeignKey, String, event, insert
+from sqlalchemy.orm import Session, declarative_base, relationship
+from typing import Dict
+
+def _fk_pragma_on_connect(dbapi_con, con_record):
+    dbapi_con.execute('pragma foreign_keys=ON')
+
+_Base = declarative_base()
+
+class SliceModel(_Base):
+    __tablename__ = 'slice'
+
+    slice_uuid = Column(String, primary_key=True)
+
+    slice_subslices = relationship(
+        'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid')
+
+    def dump_id(self) -> Dict:
+        return {'uuid': self.slice_uuid}
+
+    def dump(self) -> Dict:
+        return {
+            'slice_id': self.dump_id(),
+            'slice_subslice_ids': [
+                slice_subslice.subslice.dump_id()
+                for slice_subslice in self.slice_subslices
+            ]
+        }
+
+class SliceSubSliceModel(_Base):
+    __tablename__ = 'slice_subslice'
+
+    slice_uuid    = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True)
+    subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True)
+
+    slice    = relationship('SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined')
+    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined')
+
+def main():
+    engine = sqlalchemy.create_engine('sqlite:///:memory:', echo=False, future=True)
+    event.listen(engine, 'connect', _fk_pragma_on_connect)
+
+    _Base.metadata.create_all(engine)
+
+    slice_data = [
+        {'slice_uuid': 'slice-01'},
+        {'slice_uuid': 'slice-01-01'},
+        {'slice_uuid': 'slice-01-02'},
+    ]
+
+    slice_subslices_data = [
+        {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-01'},
+        {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-02'},
+    ]
+
+    # insert
+    with engine.connect() as conn:
+        conn.execute(insert(SliceModel).values(slice_data))
+        conn.execute(insert(SliceSubSliceModel).values(slice_subslices_data))
+        conn.commit()
+
+    # read
+    with Session(engine) as session:
+        obj_list = session.query(SliceModel).all()
+        print([obj.dump() for obj in obj_list])
+        session.commit()
+
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
+
+[
+    {'slice_id': {'uuid': 'slice-01'}, 'slice_subslice_ids': [
+        {'uuid': 'slice-01-01'},
+        {'uuid': 'slice-01-02'}
+    ]},
+    {'slice_id': {'uuid': 'slice-01-01'}, 'slice_subslice_ids': []},
+    {'slice_id': {'uuid': 'slice-01-02'}, 'slice_subslice_ids': []}
+]
diff --git a/src/tests/benchmark/policy/deploy_specs.sh b/src/tests/benchmark/policy/deploy_specs.sh
index 12a45ef92a538ff48682fe45172a27d77b2800a0..7d408f003ce411566b9bf2435d89c72ff5db1459 100755
--- a/src/tests/benchmark/policy/deploy_specs.sh
+++ b/src/tests/benchmark/policy/deploy_specs.sh
@@ -4,7 +4,7 @@
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,21 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-# Supported components are:
-#   context device automation policy service compute monitoring webui
-#   interdomain slice pathcomp dlt
-#   dbscanserving opticalattackmitigator opticalattackdetector
-#   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
 export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs"
 
 # Set additional manifest files to be applied after the deployment
@@ -35,6 +33,60 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
-export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS=""
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST=""
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
diff --git a/src/tests/benchmark/policy/tests/test_functional_bootstrap.py b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py
index 65c46b4eb5aea8d5762484d1558c14745acf83ed..ca1882aaa22ff1ac20d0b1927199a6594a6c441a 100644
--- a/src/tests/benchmark/policy/tests/test_functional_bootstrap.py
+++ b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py
@@ -13,10 +13,10 @@
 # limitations under the License.
 
 import logging, time
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import ContextId, Empty
 from common.proto.monitoring_pb2 import KpiDescriptorList
-from common.tests.LoadScenario import load_scenario_from_descriptor
-from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
@@ -27,44 +27,25 @@ LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
 DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 def test_scenario_bootstrap(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
     device_client : DeviceClient,   # pylint: disable=redefined-outer-name
 ) -> None:
-    # ----- List entities - Ensure database is empty -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == 0
+    validate_empty_scenario(context_client)
 
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
-
-
-    # ----- Load Scenario ----------------------------------------------------------------------------------------------
-    descriptor_loader = load_scenario_from_descriptor(
-        DESCRIPTOR_FILE, context_client, device_client, None, None)
-
-
-    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
 
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
 
-    for context_uuid, _ in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
 
 def test_scenario_kpis_created(
     context_client : ContextClient,         # pylint: disable=redefined-outer-name
diff --git a/src/tests/benchmark/policy/tests/test_functional_cleanup.py b/src/tests/benchmark/policy/tests/test_functional_cleanup.py
index e00c5ceeea6c59bf11bd2961802a9a3b805c5d2c..122526840796310519f8fe0feb8921e51467b21f 100644
--- a/src/tests/benchmark/policy/tests/test_functional_cleanup.py
+++ b/src/tests/benchmark/policy/tests/test_functional_cleanup.py
@@ -13,9 +13,10 @@
 # limitations under the License.
 
 import logging
-from common.tools.descriptor.Loader import DescriptorLoader
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId
+from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
-from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from tests.Fixtures import context_client, device_client    # pylint: disable=unused-import
@@ -24,57 +25,20 @@ LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
 DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
-
-def test_services_removed(
+def test_scenario_cleanup(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
     device_client : DeviceClient,   # pylint: disable=redefined-outer-name
 ) -> None:
-    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
-
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, _ in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
-
-
-    # ----- Delete Links, Devices, Topologies, Contexts ----------------------------------------------------------------
-    for link in descriptor_loader.links:
-        context_client.RemoveLink(LinkId(**link['link_id']))
-
-    for device in descriptor_loader.devices:
-        device_client .DeleteDevice(DeviceId(**device['device_id']))
-
-    for context_uuid, topology_list in descriptor_loader.topologies.items():
-        for topology in topology_list:
-            context_client.RemoveTopology(TopologyId(**topology['topology_id']))
-
-    for context in descriptor_loader.contexts:
-        context_client.RemoveContext(ContextId(**context['context_id']))
-
-
-    # ----- List entities - Ensure database is empty again -------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == 0
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    descriptor_loader.validate()
+    descriptor_loader.unload()
+    validate_empty_scenario(context_client)
diff --git a/src/tests/benchmark/policy/tests/test_functional_create_service.py b/src/tests/benchmark/policy/tests/test_functional_create_service.py
index 919f81979305831b69a82f13fbe4b70bd20ea70f..dd7761f3871db48752f313dc53e8b7d2e2c38489 100644
--- a/src/tests/benchmark/policy/tests/test_functional_create_service.py
+++ b/src/tests/benchmark/policy/tests/test_functional_create_service.py
@@ -13,83 +13,61 @@
 # limitations under the License.
 
 import logging, random
-from common.DeviceTypes import DeviceTypeEnum
-from common.proto.context_pb2 import ContextId, Empty
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.tools.descriptor.Loader import DescriptorLoader
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from monitoring.client.MonitoringClient import MonitoringClient
-from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import
+from tests.Fixtures import context_client, monitoring_client                    # pylint: disable=unused-import
 from tests.tools.mock_osm.MockOSM import MockOSM
-from .Fixtures import osm_wim # pylint: disable=unused-import
+from .Fixtures import osm_wim                                                   # pylint: disable=unused-import
 from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
-DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
-
 DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
-
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, num_services in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client)
+    descriptor_loader.validate()
 
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
 
-    # ----- Create Service ---------------------------------------------------------------------------------------------
+    # Create Connectivity Service
     service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
     osm_wim.get_connectivity_service_status(service_uuid)
 
-
-    # ----- List entities - Ensure service is created ------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, num_services in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-        assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI)
-
-        for service in response.services:
-            service_id = service.service_id
-            response = context_client.ListConnections(service_id)
-            LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
-                grpc_message_to_json_string(service_id), len(response.connections),
-                grpc_message_to_json_string(response)))
-            assert len(response.connections) == 1 # one connection per service
+    # Ensure slices and services are created
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 1 # OSM slice
+
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    assert len(response.services) == 2 # 1xL3NM + 1xTAPI
+
+    for service in response.services:
+        service_id = service.service_id
+        response = context_client.ListConnections(service_id)
+        LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
+
+        if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
+            assert len(response.connections) == 1 # 1 connection per service
+        elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
+            assert len(response.connections) == 1 # 1 connection per service
+        else:
+            str_service = grpc_message_to_json_string(service)
+            raise Exception('Unexpected ServiceType: {:s}'.format(str_service))
 
 
 def test_scenario_kpi_values_created(
diff --git a/src/tests/benchmark/policy/tests/test_functional_delete_service.py b/src/tests/benchmark/policy/tests/test_functional_delete_service.py
index 6f6ca602980fb05ffafd17f44a5bc64671c4c7b0..4fffc115e6c0ea881dea637dd741f99715d28c6a 100644
--- a/src/tests/benchmark/policy/tests/test_functional_delete_service.py
+++ b/src/tests/benchmark/policy/tests/test_functional_delete_service.py
@@ -14,86 +14,61 @@
 
 import logging
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.DeviceTypes import DeviceTypeEnum
-from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.proto.context_pb2 import ContextId, ServiceTypeEnum
 from common.tools.descriptor.Loader import DescriptorLoader
-from common.tools.object_factory.Context import json_context_id
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from tests.Fixtures import context_client   # pylint: disable=unused-import
 from tests.tools.mock_osm.MockOSM import MockOSM
-from .Fixtures import osm_wim # pylint: disable=unused-import
-
+from .Fixtures import osm_wim               # pylint: disable=unused-import
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
-DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
-
 DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
-
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure service is created ------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
+    # Ensure slices and services are created
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 1 # OSM slice
 
-    descriptor_loader = DescriptorLoader(descriptors)
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    assert len(response.services) == 2 # 1xL3NM + 1xTAPI
 
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    l3nm_service_uuids = set()
-    response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)))
-    assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI)
+    service_uuids = set()
     for service in response.services:
         service_id = service.service_id
+        response = context_client.ListConnections(service_id)
+        LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
 
         if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
+            assert len(response.connections) == 1 # 1 connection per service
             service_uuid = service_id.service_uuid.uuid
-            l3nm_service_uuids.add(service_uuid)
+            service_uuids.add(service_uuid)
             osm_wim.conn_info[service_uuid] = {}
-
-        response = context_client.ListConnections(service_id)
-        LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
-            grpc_message_to_json_string(service_id), len(response.connections),
-            grpc_message_to_json_string(response)))
-        assert len(response.connections) == 1 # one connection per service
+        elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
+            assert len(response.connections) == 1 # 1 connection per service
+        else:
+            str_service = grpc_message_to_json_string(service)
+            raise Exception('Unexpected ServiceType: {:s}'.format(str_service))
 
     # Identify service to delete
-    assert len(l3nm_service_uuids) == 1  # assume a single L3NM service has been created
-    l3nm_service_uuid = set(l3nm_service_uuids).pop()
-
-
-    # ----- Delete Service ---------------------------------------------------------------------------------------------
-    osm_wim.delete_connectivity_service(l3nm_service_uuid)
-
-
-    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
+    assert len(service_uuids) == 1  # assume a single L3NM service has been created
+    service_uuid = set(service_uuids).pop()
 
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
+    # Delete Connectivity Service
+    osm_wim.delete_connectivity_service(service_uuid)
 
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
 
-    for context_uuid, num_services in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client)
+    descriptor_loader.validate()
diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh
index 874774e1ca50830832e842e49b6fff1114cb85d8..6c3d9db662a8232f1fcccf3835b98d69571b6337 100755
--- a/src/tests/ecoc22/deploy_specs.sh
+++ b/src/tests/ecoc22/deploy_specs.sh
@@ -20,7 +20,6 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator"
 export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
 
 # Set the tag you want to use for your images.
@@ -57,7 +56,7 @@ export CRDB_DATABASE="tfs"
 # See ./deploy/all.sh or ./deploy/crdb.sh for additional details
 export CRDB_DEPLOY_MODE="single"
 
-# Disable flag for dropping database, if exists.
+# Disable flag for dropping database, if it exists.
 export CRDB_DROP_DATABASE_IF_EXISTS=""
 
 # Disable flag for re-deploying CockroachDB from scratch.
@@ -75,20 +74,20 @@ export NATS_REDEPLOY=""
 
 # ----- QuestDB ----------------------------------------------------------------
 
-# If not already set, set the namespace where QuestDB will be deployed.
+# Set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE="qdb"
 
-# If not already set, set the database username to be used by Monitoring.
+# Set the database username to be used for QuestDB.
 export QDB_USERNAME="admin"
 
-# If not already set, set the database user's password to be used by Monitoring.
+# Set the database user's password to be used for QuestDB.
 export QDB_PASSWORD="quest"
 
-# If not already set, set the table name to be used by Monitoring.
-export QDB_TABLE="tfs_monitoring"
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
 
-## If not already set, disable flag for dropping table if exists.
-#export QDB_DROP_TABLE_IF_EXISTS=""
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST=""
 
-# If not already set, disable flag for re-deploying QuestDB from scratch.
+# Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py
index 3b7b5009c0dbe9d95b4ee8e2cdbe33d39008a7a1..05691d0b274df019a87bd870fec2b9ffa3245612 100644
--- a/src/tests/ecoc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py
@@ -14,8 +14,8 @@
 
 import logging
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, Empty
-from common.tests.LoadScenario import load_scenario_from_descriptor
+from common.proto.context_pb2 import ContextId
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
@@ -31,45 +31,15 @@ def test_scenario_bootstrap(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
     device_client : DeviceClient,   # pylint: disable=redefined-outer-name
 ) -> None:
-    # ----- List entities - Ensure database is empty -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == 0
+    validate_empty_scenario(context_client)
 
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
 
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
-
-
-    # ----- Load Scenario ----------------------------------------------------------------------------------------------
-    descriptor_loader = load_scenario_from_descriptor(
-        DESCRIPTOR_FILE, context_client, device_client, None, None)
-
-
-    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, _ in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
-
-    for context_uuid, _ in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == 0
-
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py
index 3e8b5ea65fe8249102ba17b9d4ce3f2cf2296dda..088c19799615169bf8c60ae5a9226fe02ec0e4ff 100644
--- a/src/tests/ecoc22/tests/test_functional_cleanup.py
+++ b/src/tests/ecoc22/tests/test_functional_cleanup.py
@@ -14,8 +14,8 @@
 
 import logging
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
-from common.tools.descriptor.Loader import DescriptorLoader
+from common.proto.context_pb2 import ContextId
+from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
@@ -27,64 +27,18 @@ LOGGER.setLevel(logging.DEBUG)
 DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json'
 ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
-def test_services_removed(
+def test_scenario_cleanup(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
     device_client : DeviceClient,   # pylint: disable=redefined-outer-name
 ) -> None:
-    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
-
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, _ in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
-
-    for context_uuid, _ in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == 0
-
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
 
-
-    # ----- Delete Links, Devices, Topologies, Contexts ----------------------------------------------------------------
-    for link in descriptor_loader.links:
-        context_client.RemoveLink(LinkId(**link['link_id']))
-
-    for device in descriptor_loader.devices:
-        device_client .DeleteDevice(DeviceId(**device['device_id']))
-
-    for context_uuid, topology_list in descriptor_loader.topologies.items():
-        for topology in topology_list:
-            context_client.RemoveTopology(TopologyId(**topology['topology_id']))
-
-    for context in descriptor_loader.contexts:
-        context_client.RemoveContext(ContextId(**context['context_id']))
-
-
-    # ----- List entities - Ensure database is empty again -------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == 0
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    descriptor_loader.validate()
+    descriptor_loader.unload()
+    validate_empty_scenario(context_client)
diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py
index 6dd4eb827c0fbafdf0bce81c7702af5fd5fe007b..dab9c7eb131434a16dad01be4fb8cd6b6b322515 100644
--- a/src/tests/ecoc22/tests/test_functional_create_service.py
+++ b/src/tests/ecoc22/tests/test_functional_create_service.py
@@ -14,7 +14,7 @@
 
 import logging
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.proto.context_pb2 import ContextId, ServiceTypeEnum
 from common.tools.descriptor.Loader import DescriptorLoader
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
@@ -31,57 +31,23 @@ DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json'
 ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client)
+    descriptor_loader.validate()
 
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, num_services in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == num_services
-
-    for context_uuid, num_slices in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == num_slices
-
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
 
-
-    # ----- Create Service ---------------------------------------------------------------------------------------------
+    # Create Connectivity Service
     service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
     osm_wim.get_connectivity_service_status(service_uuid)
 
-
-    # ----- List entities - Ensure service is created ------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
+    # Ensure slices and services are created
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 1 # OSM slice
 
     response = context_client.ListServices(ADMIN_CONTEXT_ID)
     LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py
index 5cfdc34733d8ddc6927b52131a187fb097b36d9d..710e1a817f00f0b1664439d1c816195202a69a9d 100644
--- a/src/tests/ecoc22/tests/test_functional_delete_service.py
+++ b/src/tests/ecoc22/tests/test_functional_delete_service.py
@@ -14,14 +14,14 @@
 
 import logging
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.proto.context_pb2 import ContextId, ServiceTypeEnum
 from common.tools.descriptor.Loader import DescriptorLoader
-from common.tools.object_factory.Context import json_context_id
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
-from tests.Fixtures import context_client   # pylint: disable=unused-import
+from tests.Fixtures import context_client                                       # pylint: disable=unused-import
 from tests.tools.mock_osm.MockOSM import MockOSM
-from .Fixtures import osm_wim               # pylint: disable=unused-import
+from .Fixtures import osm_wim                                                   # pylint: disable=unused-import
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
@@ -30,44 +30,27 @@ DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json'
 ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure service is created ------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
-
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
+    # Ensure slices and services are created
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 1 # OSM slice
 
-    service_uuids = set()
     response = context_client.ListServices(ADMIN_CONTEXT_ID)
     LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
     assert len(response.services) == 3 # 1xL2NM + 2xTAPI
 
+    service_uuids = set()
     for service in response.services:
         service_id = service.service_id
-
-        if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM:
-            service_uuid = service_id.service_uuid.uuid
-            service_uuids.add(service_uuid)
-            osm_wim.conn_info[service_uuid] = {}
-
         response = context_client.ListConnections(service_id)
         LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
             grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
 
         if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM:
             assert len(response.connections) == 2 # 2 connections per service (primary + backup)
+            service_uuid = service_id.service_uuid.uuid
+            service_uuids.add(service_uuid)
+            osm_wim.conn_info[service_uuid] = {}
         elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
             assert len(response.connections) == 1 # 1 connection per service
         else:
@@ -78,34 +61,14 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p
     assert len(service_uuids) == 1  # assume a single L2NM service has been created
     service_uuid = set(service_uuids).pop()
 
-
-    # ----- Delete Service ---------------------------------------------------------------------------------------------
+    # Delete Connectivity Service
     osm_wim.delete_connectivity_service(service_uuid)
 
-
-    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, num_services in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == num_services
-
-    for context_uuid, num_slices in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == num_slices
-
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
+
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client)
+    descriptor_loader.validate()
diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh
index 874774e1ca50830832e842e49b6fff1114cb85d8..6c3d9db662a8232f1fcccf3835b98d69571b6337 100755
--- a/src/tests/ofc22/deploy_specs.sh
+++ b/src/tests/ofc22/deploy_specs.sh
@@ -20,7 +20,6 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator"
 export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
 
 # Set the tag you want to use for your images.
@@ -57,7 +56,7 @@ export CRDB_DATABASE="tfs"
 # See ./deploy/all.sh or ./deploy/crdb.sh for additional details
 export CRDB_DEPLOY_MODE="single"
 
-# Disable flag for dropping database, if exists.
+# Disable flag for dropping database, if it exists.
 export CRDB_DROP_DATABASE_IF_EXISTS=""
 
 # Disable flag for re-deploying CockroachDB from scratch.
@@ -75,20 +74,20 @@ export NATS_REDEPLOY=""
 
 # ----- QuestDB ----------------------------------------------------------------
 
-# If not already set, set the namespace where QuestDB will be deployed.
+# Set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE="qdb"
 
-# If not already set, set the database username to be used by Monitoring.
+# Set the database username to be used for QuestDB.
 export QDB_USERNAME="admin"
 
-# If not already set, set the database user's password to be used by Monitoring.
+# Set the database user's password to be used for QuestDB.
 export QDB_PASSWORD="quest"
 
-# If not already set, set the table name to be used by Monitoring.
-export QDB_TABLE="tfs_monitoring"
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
 
-## If not already set, disable flag for dropping table if exists.
-#export QDB_DROP_TABLE_IF_EXISTS=""
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST=""
 
-# If not already set, disable flag for re-deploying QuestDB from scratch.
+# Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py
index ad2d5703a931c933a9ab4e7162dd1985e5a33d9d..ca1882aaa22ff1ac20d0b1927199a6594a6c441a 100644
--- a/src/tests/ofc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ofc22/tests/test_functional_bootstrap.py
@@ -16,7 +16,7 @@ import logging, time
 from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import ContextId, Empty
 from common.proto.monitoring_pb2 import KpiDescriptorList
-from common.tests.LoadScenario import load_scenario_from_descriptor
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
@@ -33,45 +33,15 @@ def test_scenario_bootstrap(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
     device_client : DeviceClient,   # pylint: disable=redefined-outer-name
 ) -> None:
-    # ----- List entities - Ensure database is empty -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == 0
+    validate_empty_scenario(context_client)
 
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
-
-
-    # ----- Load Scenario ----------------------------------------------------------------------------------------------
-    descriptor_loader = load_scenario_from_descriptor(
-        DESCRIPTOR_FILE, context_client, device_client, None, None)
-
-
-    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, _ in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
-
-    for context_uuid, _ in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == 0
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
 
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py
index d38b653b226639d5c8c831872a64ea1f9140ef8f..122526840796310519f8fe0feb8921e51467b21f 100644
--- a/src/tests/ofc22/tests/test_functional_cleanup.py
+++ b/src/tests/ofc22/tests/test_functional_cleanup.py
@@ -14,8 +14,8 @@
 
 import logging
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
-from common.tools.descriptor.Loader import DescriptorLoader
+from common.proto.context_pb2 import ContextId
+from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
@@ -27,64 +27,18 @@ LOGGER.setLevel(logging.DEBUG)
 DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
-def test_services_removed(
+def test_scenario_cleanup(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
     device_client : DeviceClient,   # pylint: disable=redefined-outer-name
 ) -> None:
-    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
-
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, _ in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == 0
-
-    for context_uuid, _ in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == 0
-
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
 
-
-    # ----- Delete Links, Devices, Topologies, Contexts ----------------------------------------------------------------
-    for link in descriptor_loader.links:
-        context_client.RemoveLink(LinkId(**link['link_id']))
-
-    for device in descriptor_loader.devices:
-        device_client .DeleteDevice(DeviceId(**device['device_id']))
-
-    for context_uuid, topology_list in descriptor_loader.topologies.items():
-        for topology in topology_list:
-            context_client.RemoveTopology(TopologyId(**topology['topology_id']))
-
-    for context in descriptor_loader.contexts:
-        context_client.RemoveContext(ContextId(**context['context_id']))
-
-
-    # ----- List entities - Ensure database is empty again -------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == 0
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == 0
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    descriptor_loader.validate()
+    descriptor_loader.unload()
+    validate_empty_scenario(context_client)
diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py
index 92e0a74f9d291ea49422580fbdfad2c354aeeee2..dd7761f3871db48752f313dc53e8b7d2e2c38489 100644
--- a/src/tests/ofc22/tests/test_functional_create_service.py
+++ b/src/tests/ofc22/tests/test_functional_create_service.py
@@ -21,7 +21,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from monitoring.client.MonitoringClient import MonitoringClient
-from tests.Fixtures import context_client, device_client, monitoring_client     # pylint: disable=unused-import
+from tests.Fixtures import context_client, monitoring_client                    # pylint: disable=unused-import
 from tests.tools.mock_osm.MockOSM import MockOSM
 from .Fixtures import osm_wim                                                   # pylint: disable=unused-import
 from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE
@@ -33,61 +33,27 @@ DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client)
+    descriptor_loader.validate()
 
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, num_services in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == num_services
-
-    for context_uuid, num_slices in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == num_slices
-
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
 
-
-    # ----- Create Service ---------------------------------------------------------------------------------------------
+    # Create Connectivity Service
     service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
     osm_wim.get_connectivity_service_status(service_uuid)
 
-
-    # ----- List entities - Ensure service is created ------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
+    # Ensure slices and services are created
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 1 # OSM slice
 
     response = context_client.ListServices(ADMIN_CONTEXT_ID)
     LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI)
+    assert len(response.services) == 2 # 1xL3NM + 1xTAPI
 
     for service in response.services:
         service_id = service.service_id
@@ -104,7 +70,6 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): #
             raise Exception('Unexpected ServiceType: {:s}'.format(str_service))
 
 
-
 def test_scenario_kpi_values_created(
     monitoring_client: MonitoringClient,    # pylint: disable=redefined-outer-name
 ) -> None:
diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py
index 1811f219acf13b5cc17daf39f1931a6f630f997b..4fffc115e6c0ea881dea637dd741f99715d28c6a 100644
--- a/src/tests/ofc22/tests/test_functional_delete_service.py
+++ b/src/tests/ofc22/tests/test_functional_delete_service.py
@@ -14,10 +14,10 @@
 
 import logging
 from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from common.proto.context_pb2 import ContextId, ServiceTypeEnum
 from common.tools.descriptor.Loader import DescriptorLoader
-from common.tools.object_factory.Context import json_context_id
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from tests.Fixtures import context_client   # pylint: disable=unused-import
 from tests.tools.mock_osm.MockOSM import MockOSM
@@ -30,44 +30,27 @@ DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json'
 ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
-    # ----- List entities - Ensure service is created ------------------------------------------------------------------
-    with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f:
-        descriptors = f.read()
-
-    descriptor_loader = DescriptorLoader(descriptors)
-
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
+    # Ensure slices and services are created
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 1 # OSM slice
 
-    service_uuids = set()
     response = context_client.ListServices(ADMIN_CONTEXT_ID)
     LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
-    assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI)
+    assert len(response.services) == 2 # 1xL3NM + 1xTAPI
 
+    service_uuids = set()
     for service in response.services:
         service_id = service.service_id
-
-        if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
-            service_uuid = service_id.service_uuid.uuid
-            service_uuids.add(service_uuid)
-            osm_wim.conn_info[service_uuid] = {}
-
         response = context_client.ListConnections(service_id)
         LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
             grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
 
         if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
             assert len(response.connections) == 1 # 1 connection per service
+            service_uuid = service_id.service_uuid.uuid
+            service_uuids.add(service_uuid)
+            osm_wim.conn_info[service_uuid] = {}
         elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
             assert len(response.connections) == 1 # 1 connection per service
         else:
@@ -78,34 +61,14 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p
     assert len(service_uuids) == 1  # assume a single L3NM service has been created
     service_uuid = set(service_uuids).pop()
 
-
-    # ----- Delete Service ---------------------------------------------------------------------------------------------
+    # Delete Connectivity Service
     osm_wim.delete_connectivity_service(service_uuid)
 
-
-    # ----- List entities - Ensure service is removed ------------------------------------------------------------------
-    response = context_client.ListContexts(Empty())
-    assert len(response.contexts) == descriptor_loader.num_contexts
-
-    for context_uuid, num_topologies in descriptor_loader.num_topologies.items():
-        response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid)))
-        assert len(response.topologies) == num_topologies
-
-    response = context_client.ListDevices(Empty())
-    assert len(response.devices) == descriptor_loader.num_devices
-
-    response = context_client.ListLinks(Empty())
-    assert len(response.links) == descriptor_loader.num_links
-
-    for context_uuid, num_services in descriptor_loader.num_services.items():
-        response = context_client.ListServices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.services) == num_services
-
-    for context_uuid, num_slices in descriptor_loader.num_slices.items():
-        response = context_client.ListSlices(ContextId(**json_context_id(context_uuid)))
-        assert len(response.slices) == num_slices
-
-    # This scenario assumes no services are created beforehand
+    # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
+
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client)
+    descriptor_loader.validate()
diff --git a/src/tests/p4/tests/test_functional_bootstrap.py b/src/tests/p4/tests/test_functional_bootstrap.py
index 5e39490f23fe0635eaee502c1b8b8ffb9566f307..97269217336986a6a143a4a7ef94bd8b0710e9b0 100644
--- a/src/tests/p4/tests/test_functional_bootstrap.py
+++ b/src/tests/p4/tests/test_functional_bootstrap.py
@@ -106,8 +106,6 @@ def test_devices_bootstraping(
         link_uuid = link['link_id']['link_uuid']['uuid']
         LOGGER.info('Adding Link {:s}'.format(link_uuid))
         response = context_client.SetLink(Link(**link))
-        assert response.name == link_uuid
-        context_client.SetLink(Link(**link))
 
 def test_devices_bootstrapped(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure bevices are created -----------------------------------------------------------------
diff --git a/src/tests/p4/tests/test_functional_cleanup.py b/src/tests/p4/tests/test_functional_cleanup.py
index 852f2a655dd5ba6cc80902a09d3b118b34d8da47..aad56a2104797ed7238241c4d3eda8eab3c1a907 100644
--- a/src/tests/p4/tests/test_functional_cleanup.py
+++ b/src/tests/p4/tests/test_functional_cleanup.py
@@ -58,7 +58,6 @@ def test_scenario_cleanup(
         device_uuid = device_id['device_uuid']['uuid']
         LOGGER.info('Deleting Device {:s}'.format(device_uuid))
         device_client.DeleteDevice(DeviceId(**device_id))
-        #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
 
     response = context_client.ListDevices(Empty())
     assert len(response.devices) == 0
@@ -72,7 +71,6 @@ def test_scenario_cleanup(
         LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
         context_client.RemoveTopology(TopologyId(**topology_id))
         context_id = json_context_id(context_uuid)
-        #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
 
     # ----- Delete Contexts and Validate Collected Events --------------------------------------------------------------
     for context in CONTEXTS:
@@ -80,4 +78,3 @@ def test_scenario_cleanup(
         context_uuid = context_id['context_uuid']['uuid']
         LOGGER.info('Deleting Context {:s}'.format(context_uuid))
         context_client.RemoveContext(ContextId(**context_id))
-        #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
diff --git a/src/tests/p4/tests/test_functional_create_service.py b/src/tests/p4/tests/test_functional_create_service.py
index beaa23ba3e056fabb528fc7dc5dbebb43b0f019b..76a681eeaff30434663a2391509c3f266e89ecb0 100644
--- a/src/tests/p4/tests/test_functional_create_service.py
+++ b/src/tests/p4/tests/test_functional_create_service.py
@@ -54,15 +54,6 @@ def service_client():
 def test_rules_entry(
     context_client : ContextClient, device_client : DeviceClient, service_client : ServiceClient):  # pylint: disable=redefined-outer-name
 
-
-
-#    for device, _, __ in DEVICES:
-#        # Enable device
-#        device_p4_with_operational_status = copy.deepcopy(device)
-#        device_p4_with_operational_status['device_operational_status'] = \
-#            DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
-#        device_client.ConfigureDevice(Device(**device_p4_with_operational_status))
-
     # ----- Create Services ---------------------------------------------------------------
     for service, endpoints in SERVICES:
         # Insert Service (table entries)
@@ -71,4 +62,4 @@ def test_rules_entry(
         service_p4 = copy.deepcopy(service)
         service_client.CreateService(Service(**service_p4))
         service_p4['service_endpoint_ids'].extend(endpoints)
-        service_client.UpdateService(Service(**service_p4))
\ No newline at end of file
+        service_client.UpdateService(Service(**service_p4))
diff --git a/src/tests/tools/load_scenario/__main__.py b/src/tests/tools/load_scenario/__main__.py
index 3559f778d7cf850c3bbb4f2d516f45f18423d28c..df1d5d8bf3d729a459ab6570e81e6ea05f47c981 100644
--- a/src/tests/tools/load_scenario/__main__.py
+++ b/src/tests/tools/load_scenario/__main__.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging, sys
-from common.tests.LoadScenario import load_scenario_from_descriptor
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
@@ -29,7 +29,12 @@ def main():
     slice_client = SliceClient()
 
     LOGGER.info('Loading scenario...')
-    load_scenario_from_descriptor(sys.argv[1], context_client, device_client, service_client, slice_client)
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=sys.argv[1], context_client=context_client, device_client=device_client,
+        service_client=service_client, slice_client=slice_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
     LOGGER.info('Done!')
     return 0
 
diff --git a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json
index a4109bc7b18d2855f97f5bb329d4354a04b31607..2d4ed3eaf1834f24ba966fbcaac523ca9a3afb9a 100644
--- a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json
+++ b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json
@@ -12,8 +12,8 @@
                 {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "EXT"}}
             ],
             "service_constraints": [
-                {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}},
-                {"custom": {"constraint_type": "latency[ms]", "constraint_value": "15.2"}}
+                {"sla_capacity": {"capacity_gbps": 10.0}},
+                {"sla_latency": {"e2e_latency_ms": 15.2}}
             ],
             "service_config": {"config_rules": [
                 {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {
diff --git a/src/webui/grafana_dashboard_psql.json b/src/webui/grafana_db_mon_kpis_psql.json
similarity index 91%
rename from src/webui/grafana_dashboard_psql.json
rename to src/webui/grafana_db_mon_kpis_psql.json
index ec89c1647cc1086140b0bbd35354546c405ce910..750e5254ea1e4e689d92fc39cedd22a5ee619e03 100644
--- a/src/webui/grafana_dashboard_psql.json
+++ b/src/webui/grafana_db_mon_kpis_psql.json
@@ -33,7 +33,7 @@
       {
         "datasource": {
           "type": "postgres",
-          "uid": "questdb"
+          "uid": "questdb-mon-kpi"
         },
         "fieldConfig": {
           "defaults": {
@@ -162,14 +162,14 @@
           {
             "datasource": {
               "type": "postgres",
-              "uid": "questdb"
+              "uid": "questdb-mon-kpi"
             },
             "format": "time_series",
             "group": [],
             "hide": false,
             "metricColumn": "kpi_value",
             "rawQuery": true,
-            "rawSql": "SELECT\r\n  $__time(timestamp), kpi_value AS metric, device_name, endpoint_name, kpi_sample_type\r\nFROM\r\n  tfs_monitoring\r\nWHERE\r\n  $__timeFilter(timestamp) AND device_name IN (${device_name}) AND endpoint_name IN (${endpoint_name}) AND kpi_sample_type IN (${kpi_sample_type})\r\nGROUP BY\r\n  device_name, endpoint_name, kpi_sample_type\r\nORDER BY\r\n  timestamp",
+            "rawSql": "SELECT\r\n  $__time(timestamp), kpi_value AS metric, device_name, endpoint_name, kpi_sample_type\r\nFROM\r\n  tfs_monitoring_kpis\r\nWHERE\r\n  $__timeFilter(timestamp) AND device_name IN (${device_name}) AND endpoint_name IN (${endpoint_name}) AND kpi_sample_type IN (${kpi_sample_type})\r\nGROUP BY\r\n  device_name, endpoint_name, kpi_sample_type\r\nORDER BY\r\n  timestamp",
             "refId": "A",
             "select": [
               [
@@ -181,7 +181,7 @@
                 }
               ]
             ],
-            "table": "monitoring",
+            "table": "tfs_monitoring_kpis",
             "timeColumn": "timestamp",
             "where": [
               {
@@ -227,16 +227,16 @@
           },
           "datasource": {
             "type": "postgres",
-            "uid": "questdb"
+            "uid": "questdb-mon-kpi"
           },
-          "definition": "SELECT DISTINCT device_name FROM tfs_monitoring;",
+          "definition": "SELECT DISTINCT device_name FROM tfs_monitoring_kpis;",
           "hide": 0,
           "includeAll": true,
           "label": "Device",
           "multi": true,
           "name": "device_name",
           "options": [],
-          "query": "SELECT DISTINCT device_name FROM tfs_monitoring;",
+          "query": "SELECT DISTINCT device_name FROM tfs_monitoring_kpis;",
           "refresh": 2,
           "regex": "",
           "skipUrlSync": false,
@@ -255,16 +255,16 @@
           },
           "datasource": {
             "type": "postgres",
-            "uid": "questdb"
+            "uid": "questdb-mon-kpi"
           },
-          "definition": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})",
+          "definition": "SELECT DISTINCT endpoint_name FROM tfs_monitoring_kpis WHERE device_name IN (${device_name})",
           "hide": 0,
           "includeAll": true,
           "label": "EndPoint",
           "multi": true,
           "name": "endpoint_name",
           "options": [],
-          "query": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})",
+          "query": "SELECT DISTINCT endpoint_name FROM tfs_monitoring_kpis WHERE device_name IN (${device_name})",
           "refresh": 2,
           "regex": "",
           "skipUrlSync": false,
@@ -283,16 +283,16 @@
           },
           "datasource": {
             "type": "postgres",
-            "uid": "questdb"
+            "uid": "questdb-mon-kpi"
           },
-          "definition": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;",
+          "definition": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring_kpis;",
           "hide": 0,
           "includeAll": true,
           "label": "Kpi Sample Type",
           "multi": true,
           "name": "kpi_sample_type",
           "options": [],
-          "query": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;",
+          "query": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring_kpis;",
           "refresh": 2,
           "regex": "",
           "skipUrlSync": false,
@@ -308,7 +308,7 @@
     "timepicker": {},
     "timezone": "utc",
     "title": "L3 Monitoring",
-    "uid": "tf-l3-monit",
+    "uid": "tfs-l3-monit",
     "version": 6,
     "weekStart": ""
   }
diff --git a/src/webui/grafana_db_slc_grps_psql.json b/src/webui/grafana_db_slc_grps_psql.json
new file mode 100644
index 0000000000000000000000000000000000000000..6aa7a478b6a19a83fa1677579163859eca6dd348
--- /dev/null
+++ b/src/webui/grafana_db_slc_grps_psql.json
@@ -0,0 +1,176 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "id": null,
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": {
+            "type": "grafana",
+            "uid": "-- Grafana --"
+          },
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "target": {
+            "limit": 100,
+            "matchAny": false,
+            "tags": [],
+            "type": "dashboard"
+          },
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "fiscalYearStartMonth": 0,
+    "graphTooltip": 0,
+    "links": [],
+    "liveNow": false,
+    "panels": [
+      {
+        "datasource": {
+          "type": "postgres",
+          "uid": "questdb-slc-grp"
+        },
+        "gridPos": {
+          "h": 21,
+          "w": 11,
+          "x": 0,
+          "y": 0
+        },
+        "id": 2,
+        "options": {
+          "ReferenceLines": [],
+          "border": {
+            "color": "yellow",
+            "size": 0
+          },
+          "fieldSets": [
+            {
+              "col": 6,
+              "color": "#C4162A",
+              "colorCol": 3,
+              "dotSize": 2,
+              "hidden": false,
+              "lineSize": 1,
+              "lineType": "none",
+              "polynomialOrder": 3,
+              "sizeCol": -7
+            },
+            {
+              "col": 5,
+              "color": "#edcd7d",
+              "colorCol": 3,
+              "dotSize": 2,
+              "hidden": false,
+              "lineSize": 1,
+              "lineType": "none",
+              "polynomialOrder": 3,
+              "sizeCol": -2
+            }
+          ],
+          "grid": {
+            "color": "gray"
+          },
+          "label": {
+            "col": -1,
+            "color": "#CCC",
+            "textSize": 2
+          },
+          "legend": {
+            "show": false,
+            "size": 0
+          },
+          "xAxis": {
+            "col": 4,
+            "inverted": false
+          },
+          "xAxisExtents": {
+            "min": 0,
+            "max": 100
+          },
+          "xAxisTitle": {
+            "text": "Availability %",
+            "color": "white",
+            "textSize": 2,
+            "rotated": false,
+            "logScale": false,
+            "fontSize": 4,
+            "fontColor": "white"
+          },
+          "xMargins": {
+            "lower": 30,
+            "upper": 10
+          },
+          "yAxisExtents": {
+            "min": 0,
+            "max": 100
+          },
+          "yAxisTitle": {
+            "text": "Capacity Gb/s",
+            "color": "#ccccdc",
+            "textSize": 2,
+            "rotated": true,
+            "logScale": false,
+            "fontSize": 4,
+            "fontColor": "white"
+          },
+          "yMargins": {
+            "lower": 20,
+            "upper": 20
+          }
+        },
+        "targets": [
+          {
+            "datasource": {
+              "type": "postgres",
+              "uid": "questdb-slc-grp"
+            },
+            "format": "table",
+            "group": [],
+            "hide": false,
+            "metricColumn": "none",
+            "rawQuery": true,
+            "rawSql": "SELECT timestamp as \"time\", slice_uuid, slice_group, slice_color, slice_availability, slice_capacity, slice_capacity_center, is_deleted\nFROM tfs_slice_groups\nWHERE $__timeFilter(timestamp) AND is_deleted <> 'true';",
+            "refId": "A",
+            "select": [
+              [
+                {
+                  "params": [
+                    "value"
+                  ],
+                  "type": "column"
+                }
+              ]
+            ],
+            "table": "tfs_slice_groups",
+            "timeColumn": "timestamp",
+            "where": []
+          }
+        ],
+        "title": "Slice Groups",
+        "transformations": [],
+        "type": "michaeldmoore-scatter-panel"
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 36,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": []
+    },
+    "time": {
+      "from": "now-30m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "Slice Grouping",
+    "uid": "tfs-slice-grps",
+    "version": 2,
+    "weekStart": ""
+  }
+}
diff --git a/src/webui/grafana_backup_dashboard.json b/src/webui/old/grafana_backup_dashboard.json
similarity index 100%
rename from src/webui/grafana_backup_dashboard.json
rename to src/webui/old/grafana_backup_dashboard.json
diff --git a/src/webui/grafana_dashboard.json b/src/webui/old/grafana_dashboard.json
similarity index 100%
rename from src/webui/grafana_dashboard.json
rename to src/webui/old/grafana_dashboard.json
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index d5b40b486dd7772cea29fd7d71db949b2954155c..fca1071419b3b2b61739c2a0d1d8bfa45aba5119 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -96,7 +96,9 @@ def create_app(use_config=None, web_app_root=None):
     app.register_blueprint(link)
 
     app.jinja_env.globals.update({              # pylint: disable=no-member
+        'enumerate'           : enumerate,
         'json_to_list'        : json_to_list,
+        'round'               : round,
         'get_working_context' : get_working_context,
         'get_working_topology': get_working_topology,
     })
diff --git a/src/webui/service/device/forms.py b/src/webui/service/device/forms.py
index e496c4d432c7c9d02227141ea6d618984378c185..c6bacac9bc1723a020f3057fad9c9e8306c9dbca 100644
--- a/src/webui/service/device/forms.py
+++ b/src/webui/service/device/forms.py
@@ -12,21 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# external imports
 from flask_wtf import FlaskForm
-from wtforms import StringField, SelectField, TextAreaField, SubmitField, BooleanField, Form
-from wtforms.validators import DataRequired, Length, NumberRange, Regexp, ValidationError
+from wtforms import StringField, SelectField, TextAreaField, SubmitField, BooleanField
+from wtforms.validators import DataRequired, Length, NumberRange, ValidationError
 from common.proto.context_pb2 import DeviceOperationalStatusEnum
-from webui.utils.form_validators import key_value_validator
 
 class AddDeviceForm(FlaskForm):
     device_id = StringField('ID', 
                            validators=[DataRequired(), Length(min=5)])
-    device_type = SelectField('Type', choices = [])                                                     
-    operational_status = SelectField('Operational Status',
-                        #    choices=[(-1, 'Select...'), (0, 'Undefined'), (1, 'Disabled'), (2, 'Enabled')],
-                           coerce=int,
-                           validators=[NumberRange(min=0)])
+    device_type = SelectField('Type')
+    operational_status = SelectField('Operational Status', coerce=int, validators=[NumberRange(min=0)])
     device_drivers_undefined = BooleanField('UNDEFINED / EMULATED')
     device_drivers_openconfig = BooleanField('OPENCONFIG')
     device_drivers_transport_api = BooleanField('TRANSPORT_API')
diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py
index ce3edcfda45859c3e5db83c62fd328ee546762a5..ebf77a35ffdf9c2546ddbdd1bac0c8c1f54a2b56 100644
--- a/src/webui/service/device/routes.py
+++ b/src/webui/service/device/routes.py
@@ -14,16 +14,14 @@
 
 import json
 from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for
+from common.DeviceTypes import DeviceTypeEnum
 from common.proto.context_pb2 import (
-    ConfigActionEnum, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, Empty, TopologyId)
-from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Topology import json_topology_id
+    ConfigActionEnum, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, Empty)
+from common.tools.context_queries.Device import get_device
+from common.tools.context_queries.Topology import get_topology
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
-from webui.service.device.forms import AddDeviceForm
-from common.DeviceTypes import DeviceTypeEnum
-from webui.service.device.forms import ConfigForm
-from webui.service.device.forms import UpdateDeviceForm
+from webui.service.device.forms import AddDeviceForm, ConfigForm, UpdateDeviceForm
 
 device = Blueprint('device', __name__, url_prefix='/device')
 context_client = ContextClient()
@@ -39,17 +37,19 @@ def home():
     topology_uuid = session['topology_uuid']
 
     context_client.connect()
-    json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
-    grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
-    topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}
-    grpc_devices: DeviceList = context_client.ListDevices(Empty())
+    grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False)
+    if grpc_topology is None:
+        flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger')
+        devices = []
+    else:
+        topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}
+        grpc_devices: DeviceList = context_client.ListDevices(Empty())
+        devices = [
+            device for device in grpc_devices.devices
+            if device.device_id.device_uuid.uuid in topo_device_uuids
+        ]
     context_client.close()
 
-    devices = [
-        device for device in grpc_devices.devices
-        if device.device_id.device_uuid.uuid in topo_device_uuids
-    ]
-
     return render_template(
         'device/home.html', devices=devices, dde=DeviceDriverEnum,
         dose=DeviceOperationalStatusEnum)
@@ -71,23 +71,23 @@ def add():
     if form.validate_on_submit():
         device_obj = Device()
         # Device UUID: 
-        device_obj.device_id.device_uuid.uuid = form.device_id.data
+        device_obj.device_id.device_uuid.uuid = form.device_id.data # pylint: disable=no-member
 
         # Device type: 
         device_obj.device_type = str(form.device_type.data)
 
         # Device configurations: 
-        config_rule = device_obj.device_config.config_rules.add()
+        config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member
         config_rule.action = ConfigActionEnum.CONFIGACTION_SET
         config_rule.custom.resource_key = '_connect/address'
         config_rule.custom.resource_value = form.device_config_address.data
 
-        config_rule = device_obj.device_config.config_rules.add()
+        config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member
         config_rule.action = ConfigActionEnum.CONFIGACTION_SET
         config_rule.custom.resource_key = '_connect/port'
         config_rule.custom.resource_value = form.device_config_port.data
 
-        config_rule = device_obj.device_config.config_rules.add()
+        config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member
         config_rule.action = ConfigActionEnum.CONFIGACTION_SET
         config_rule.custom.resource_key = '_connect/settings'
 
@@ -105,20 +105,22 @@ def add():
         device_obj.device_operational_status = form.operational_status.data
 
         # Device drivers: 
+        device_drivers = list()
         if form.device_drivers_undefined.data:
-            device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED)
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED)
         if form.device_drivers_openconfig.data:
-            device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG)
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG)
         if form.device_drivers_transport_api.data:
-            device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API)
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API)
         if form.device_drivers_p4.data:
-            device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4)
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4)
         if form.device_drivers_ietf_network_topology.data:
-            device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY)
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY)
         if form.device_drivers_onf_tr_352.data:
-            device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352)
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352)
         if form.device_drivers_xr.data:
-            device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR)
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR)
+        device_obj.device_drivers.extend(device_drivers) # pylint: disable=no-member
 
         try:
             device_client.connect()
@@ -126,7 +128,7 @@ def add():
             device_client.close()
             flash(f'New device was created with ID "{response.device_uuid.uuid}".', 'success')
             return redirect(url_for('device.home'))
-        except Exception as e:
+        except Exception as e: # pylint: disable=broad-except
             flash(f'Problem adding the device. {e.details()}', 'danger')
         
     return render_template('device/add.html', form=form,
@@ -134,14 +136,15 @@ def add():
 
 @device.route('detail/<path:device_uuid>', methods=['GET', 'POST'])
 def detail(device_uuid: str):
-    request = DeviceId()
-    request.device_uuid.uuid = device_uuid
     context_client.connect()
-    response = context_client.GetDevice(request)
+    device_obj = get_device(context_client, device_uuid, rw_copy=False)
+    if device_obj is None:
+        flash('Device({:s}) not found'.format(str(device_uuid)), 'danger')
+        device_obj = Device()
     context_client.close()
-    return render_template('device/detail.html', device=response,
-                                                 dde=DeviceDriverEnum,
-                                                 dose=DeviceOperationalStatusEnum)
+
+    return render_template(
+        'device/detail.html', device=device_obj, dde=DeviceDriverEnum, dose=DeviceOperationalStatusEnum)
 
 @device.get('<path:device_uuid>/delete')
 def delete(device_uuid):
@@ -154,13 +157,13 @@ def delete(device_uuid):
         # TODO: finalize implementation
 
         request = DeviceId()
-        request.device_uuid.uuid = device_uuid
+        request.device_uuid.uuid = device_uuid # pylint: disable=no-member
         device_client.connect()
-        response = device_client.DeleteDevice(request)
+        device_client.DeleteDevice(request)
         device_client.close()
 
         flash(f'Device "{device_uuid}" deleted successfully!', 'success')
-    except Exception as e:
+    except Exception as e: # pylint: disable=broad-except
         flash(f'Problem deleting device "{device_uuid}": {e.details()}', 'danger')
         current_app.logger.exception(e)
     return redirect(url_for('device.home'))
@@ -169,25 +172,25 @@ def delete(device_uuid):
 def addconfig(device_uuid):
     form = ConfigForm()
     request = DeviceId()
-    request.device_uuid.uuid = device_uuid
+    request.device_uuid.uuid = device_uuid # pylint: disable=no-member
     context_client.connect()
     response = context_client.GetDevice(request)
     context_client.close()
 
     if form.validate_on_submit():
-        device = Device()
-        device.CopyFrom(response)
-        config_rule = device.device_config.config_rules.add()
+        device_obj = Device()
+        device_obj.CopyFrom(response)
+        config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member
         config_rule.action = ConfigActionEnum.CONFIGACTION_SET
         config_rule.custom.resource_key = form.device_key_config.data
         config_rule.custom.resource_value = form.device_value_config.data
         try:
             device_client.connect()
-            response: DeviceId = device_client.ConfigureDevice(device)
+            response: DeviceId = device_client.ConfigureDevice(device_obj)
             device_client.close()
             flash(f'New configuration was created with ID "{response.device_uuid.uuid}".', 'success')
             return redirect(url_for('device.home'))
-        except Exception as e:
+        except Exception as e: # pylint: disable=broad-except
              flash(f'Problem adding the device. {e.details()}', 'danger')
 
     return render_template('device/addconfig.html', form=form,  submit_text='Add New Configuration')
@@ -203,28 +206,29 @@ def updateconfig():
 def update(device_uuid):
     form = UpdateDeviceForm()
     request = DeviceId()
-    request.device_uuid.uuid = device_uuid
+    request.device_uuid.uuid = device_uuid # pylint: disable=no-member
     context_client.connect()
     response = context_client.GetDevice(request)
     context_client.close()
 
     # listing enum values
     form.update_operational_status.choices = []
-    for key, value in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items():
-        form.update_operational_status.choices.append((DeviceOperationalStatusEnum.Value(key), key.replace('DEVICEOPERATIONALSTATUS_', '')))
+    for key, _ in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items():
+        item = (DeviceOperationalStatusEnum.Value(key), key.replace('DEVICEOPERATIONALSTATUS_', ''))
+        form.update_operational_status.choices.append(item)
 
     form.update_operational_status.default = response.device_operational_status
 
     if form.validate_on_submit():
-        device = Device()
-        device.CopyFrom(response)
-        device.device_operational_status = form.update_operational_status.data
+        device_obj = Device()
+        device_obj.CopyFrom(response)
+        device_obj.device_operational_status = form.update_operational_status.data
         try:
             device_client.connect()
-            response: DeviceId = device_client.ConfigureDevice(device)
+            response: DeviceId = device_client.ConfigureDevice(device_obj)
             device_client.close()
             flash(f'Status of device with ID "{response.device_uuid.uuid}" was updated.', 'success')
             return redirect(url_for('device.home'))
-        except Exception as e:
+        except Exception as e: # pylint: disable=broad-except
              flash(f'Problem updating the device. {e.details()}', 'danger')  
     return render_template('device/update.html', device=response, form=form, submit_text='Update Device')
diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py
index 9324ad0be6d9e72dfd3413863f0590f6ec595c3b..0fda8958e2ab2609969d2c1f68aaae61b7360b68 100644
--- a/src/webui/service/link/routes.py
+++ b/src/webui/service/link/routes.py
@@ -14,10 +14,10 @@
 
 
 from flask import render_template, Blueprint, flash, session, redirect, url_for
-from common.proto.context_pb2 import Empty, LinkId, LinkList, TopologyId
+from common.proto.context_pb2 import Empty, Link, LinkList
 from common.tools.context_queries.EndPoint import get_endpoint_names
-from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Topology import json_topology_id
+from common.tools.context_queries.Link import get_link
+from common.tools.context_queries.Topology import get_topology
 from context.client.ContextClient import ContextClient
 
 
@@ -33,20 +33,21 @@ def home():
     context_uuid = session['context_uuid']
     topology_uuid = session['topology_uuid']
 
+    links, endpoint_ids = list(), list()
+    device_names, endpoints_data = dict(), dict()
+
     context_client.connect()
-    json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))
-    grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id))
-    topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids}
-    grpc_links: LinkList = context_client.ListLinks(Empty())
-
-    endpoint_ids = []
-    links = []
-    for link_ in grpc_links.links:
-        if link_.link_id.link_uuid.uuid not in topo_link_uuids: continue
-        links.append(link_)
-        endpoint_ids.extend(link_.link_endpoint_ids)
-
-    device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
+    grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False)
+    if grpc_topology is None:
+        flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger')
+    else:
+        topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids}
+        grpc_links: LinkList = context_client.ListLinks(Empty())
+        for link_ in grpc_links.links:
+            if link_.link_id.link_uuid.uuid not in topo_link_uuids: continue
+            links.append(link_)
+            endpoint_ids.extend(link_.link_endpoint_ids)
+        device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
     context_client.close()
 
     return render_template('link/home.html', links=links, device_names=device_names, endpoints_data=endpoints_data)
@@ -54,10 +55,13 @@ def home():
 
 @link.route('detail/<path:link_uuid>', methods=('GET', 'POST'))
 def detail(link_uuid: str):
-    request = LinkId()
-    request.link_uuid.uuid = link_uuid  # pylint: disable=no-member
     context_client.connect()
-    response = context_client.GetLink(request)
-    device_names, endpoints_data = get_endpoint_names(context_client, response.link_endpoint_ids)
+    link_obj = get_link(context_client, link_uuid, rw_copy=False)
+    if link_obj is None:
+        flash('Link({:s}) not found'.format(str(link_uuid)), 'danger')
+        link_obj = Link()
+        device_names, endpoints_data = dict(), dict()
+    else:
+        device_names, endpoints_data = get_endpoint_names(context_client, link_obj.link_endpoint_ids)
     context_client.close()
-    return render_template('link/detail.html',link=response, device_names=device_names, endpoints_data=endpoints_data)
+    return render_template('link/detail.html',link=link_obj, device_names=device_names, endpoints_data=endpoints_data)
diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e0020b04f33152de382f5b93af9735f8d737f92
--- /dev/null
+++ b/src/webui/service/load_gen/forms.py
@@ -0,0 +1,42 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from flask_wtf import FlaskForm
+from wtforms import BooleanField, FloatField, IntegerField, StringField, SubmitField
+from wtforms.validators import DataRequired, NumberRange
+
+class LoadGenForm(FlaskForm):
+    num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)])
+    num_generated = IntegerField('Num Generated', default=0, render_kw={'readonly': True})
+
+    request_type_service_l2nm = BooleanField('Service L2NM', default=False)
+    request_type_service_l3nm = BooleanField('Service L3NM', default=False)
+    request_type_service_mw = BooleanField('Service MW', default=False)
+    request_type_service_tapi = BooleanField('Service TAPI', default=False)
+    request_type_slice_l2nm = BooleanField('Slice L2NM', default=True)
+    request_type_slice_l3nm = BooleanField('Slice L3NM', default=False)
+
+    offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=0.0)])
+    holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)])
+    inter_arrival_time = FloatField('Inter Arrival Time [seconds]', default=0, validators=[NumberRange(min=0.0)])
+
+    do_teardown = BooleanField('Do Teardown', default=True)
+
+    record_to_dlt = BooleanField('Record to DLT', default=False)
+    dlt_domain_id = StringField('DLT Domain Id', default='')
+
+    infinite_loop = BooleanField('Infinite Loop', default=False, render_kw={'disabled': True})
+    running = BooleanField('Running', default=False, render_kw={'disabled': True})
+
+    submit = SubmitField('Start/Stop')
diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py
index 3118b6de0e061adac65be178163623cd2d1d8fff..5f47f06b0ff59ad1383aab94caa41adc08440c87 100644
--- a/src/webui/service/load_gen/routes.py
+++ b/src/webui/service/load_gen/routes.py
@@ -12,34 +12,115 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from flask import render_template, Blueprint, flash
+from typing import Any, Optional
+from flask import redirect, render_template, Blueprint, flash, url_for
 from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2 import Parameters, RequestTypeEnum
 from load_generator.client.LoadGeneratorClient import LoadGeneratorClient
+from .forms import LoadGenForm
 
 load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen')
 
-@load_gen.route('start', methods=['GET'])
-def start():
+def set_properties(field, data : Any, readonly : Optional[bool] = None, disabled : Optional[bool] = None) -> None:
+    if not hasattr(field, 'render_kw'):
+        field.render_kw = dict()
+    elif field.render_kw is None:
+        field.render_kw = dict()
+
+    if readonly is not None:
+        field.render_kw['readonly'] = readonly
+    if disabled is not None:
+        field.render_kw['disabled'] = disabled
+
+    if (readonly is not None and readonly) or (disabled is not None and disabled):
+        field.data = data
+
+@load_gen.route('home', methods=['GET'])
+def home():
     load_gen_client = LoadGeneratorClient()
-    try:
-        load_gen_client.connect()
-        load_gen_client.Start(Empty())
-        load_gen_client.close()
-        flash('Load Generator Started.', 'success')
-    except Exception as e: # pylint: disable=broad-except
-        flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger')
 
-    return render_template('main/debug.html')
+    load_gen_client.connect()
+    status = load_gen_client.GetStatus(Empty())
+    load_gen_client.close()
+
+    request_types = status.parameters.request_types
+    _request_type_service_l2nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM in request_types
+    _request_type_service_l3nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM in request_types
+    _request_type_service_mw   = RequestTypeEnum.REQUESTTYPE_SERVICE_MW   in request_types
+    _request_type_service_tapi = RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI in request_types
+    _request_type_slice_l2nm   = RequestTypeEnum.REQUESTTYPE_SLICE_L2NM   in request_types
+    _request_type_slice_l3nm   = RequestTypeEnum.REQUESTTYPE_SLICE_L3NM   in request_types
+
+    _offered_load       = round(status.parameters.offered_load       , ndigits=4)
+    _holding_time       = round(status.parameters.holding_time       , ndigits=4)
+    _inter_arrival_time = round(status.parameters.inter_arrival_time , ndigits=4)
+
+    form = LoadGenForm()
+    set_properties(form.num_requests             , status.parameters.num_requests , readonly=status.running)
+    set_properties(form.offered_load             , _offered_load                  , readonly=status.running)
+    set_properties(form.holding_time             , _holding_time                  , readonly=status.running)
+    set_properties(form.inter_arrival_time       , _inter_arrival_time            , readonly=status.running)
+    set_properties(form.do_teardown              , status.parameters.do_teardown  , disabled=status.running)
+    set_properties(form.record_to_dlt            , status.parameters.record_to_dlt, disabled=status.running)
+    set_properties(form.dlt_domain_id            , status.parameters.dlt_domain_id, readonly=status.running)
+    set_properties(form.request_type_service_l2nm, _request_type_service_l2nm     , disabled=status.running)
+    set_properties(form.request_type_service_l3nm, _request_type_service_l3nm     , disabled=status.running)
+    set_properties(form.request_type_service_mw  , _request_type_service_mw       , disabled=status.running)
+    set_properties(form.request_type_service_tapi, _request_type_service_tapi     , disabled=status.running)
+    set_properties(form.request_type_slice_l2nm  , _request_type_slice_l2nm       , disabled=status.running)
+    set_properties(form.request_type_slice_l3nm  , _request_type_slice_l3nm       , disabled=status.running)
+    set_properties(form.num_generated            , status.num_generated           , disabled=True)
+    set_properties(form.infinite_loop            , status.infinite_loop           , disabled=True)
+    set_properties(form.running                  , status.running                 , disabled=True)
 
-@load_gen.route('stop', methods=['GET'])
+    form.submit.label.text = 'Stop' if status.running else 'Start'
+    form_action = url_for('load_gen.stop') if status.running else url_for('load_gen.start')
+    return render_template('load_gen/home.html', form=form, form_action=form_action)
+
+@load_gen.route('start', methods=['POST'])
+def start():
+    form = LoadGenForm()
+    if form.validate_on_submit():
+        try:
+            load_gen_params = Parameters()
+            load_gen_params.num_requests       = form.num_requests.data
+            load_gen_params.offered_load       = form.offered_load.data
+            load_gen_params.holding_time       = form.holding_time.data
+            load_gen_params.inter_arrival_time = form.inter_arrival_time.data
+            load_gen_params.do_teardown        = form.do_teardown.data
+            load_gen_params.dry_mode           = False
+            load_gen_params.record_to_dlt      = form.record_to_dlt.data
+            load_gen_params.dlt_domain_id      = form.dlt_domain_id.data
+
+            del load_gen_params.request_types[:] # pylint: disable=no-member
+            request_types = list()
+            if form.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM)
+            if form.request_type_service_l3nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM)
+            if form.request_type_service_mw  .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_MW  )
+            if form.request_type_service_tapi.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI)
+            if form.request_type_slice_l2nm  .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L2NM  )
+            if form.request_type_slice_l3nm  .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L3NM  )
+            load_gen_params.request_types.extend(request_types) # pylint: disable=no-member
+
+            load_gen_client = LoadGeneratorClient()
+            load_gen_client.connect()
+            load_gen_client.Start(load_gen_params)
+            load_gen_client.close()
+            flash('Load Generator Started.', 'success')
+        except Exception as e: # pylint: disable=broad-except
+            flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger')
+    return redirect(url_for('load_gen.home'))
+
+@load_gen.route('stop', methods=['POST'])
 def stop():
-    load_gen_client = LoadGeneratorClient()
-    try:
-        load_gen_client.connect()
-        load_gen_client.Stop(Empty())
-        load_gen_client.close()
-        flash('Load Generator Stoped.', 'success')
-    except Exception as e: # pylint: disable=broad-except
-        flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger')
-
-    return render_template('main/debug.html')
+    form = LoadGenForm()
+    if form.validate_on_submit():
+        try:
+            load_gen_client = LoadGeneratorClient()
+            load_gen_client.connect()
+            load_gen_client.Stop(Empty())
+            load_gen_client.close()
+            flash('Load Generator Stopped.', 'success')
+        except Exception as e: # pylint: disable=broad-except
+            flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger')
+    return redirect(url_for('load_gen.home'))
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index dcbbf71a6fee6ebd040f14c7d0d2cb07ba9ee085..32cefddf3b2a8251623b60fd9fc039588cd6b9bb 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import base64, json, logging, re
+import base64, json, logging #, re
 from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
 from common.proto.context_pb2 import ContextList, Empty, TopologyId, TopologyList
 from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
@@ -55,7 +55,7 @@ def process_descriptors(descriptors):
 def home():
     context_client.connect()
     device_client.connect()
-    context_topology_form: ContextTopologyForm = ContextTopologyForm()
+    context_topology_form = ContextTopologyForm()
     context_topology_form.context_topology.choices.append(('', 'Select...'))
 
     contexts : ContextList = context_client.ListContexts(Empty())
@@ -87,6 +87,10 @@ def home():
             #session['topology_name'] = topology_name
             MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.'
             flash(MSG, 'success')
+
+            context_client.close()
+            device_client.close()
+
             return redirect(url_for('main.home'))
 
             #match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid)
@@ -101,7 +105,7 @@ def home():
     if 'context_topology_uuid' in session:
         context_topology_form.context_topology.data = session['context_topology_uuid']
 
-    descriptor_form: DescriptorForm = DescriptorForm()
+    descriptor_form = DescriptorForm()
     try:
         if descriptor_form.validate_on_submit():
             process_descriptors(descriptor_form.descriptors)
diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py
index ee9b092ae6828d7e2a82c66b1461c2f90853a803..defbe2cb003cc97830d6ec24db01bf8734a7f530 100644
--- a/src/webui/service/service/routes.py
+++ b/src/webui/service/service/routes.py
@@ -14,8 +14,11 @@
 
 import grpc
 from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for
-from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection
+from common.proto.context_pb2 import (
+    IsolationLevelEnum, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection)
+from common.tools.context_queries.Context import get_context
 from common.tools.context_queries.EndPoint import get_endpoint_names
+from common.tools.context_queries.Service import get_service
 from context.client.ContextClient import ContextClient
 from service.client.ServiceClient import ServiceClient
 
@@ -26,93 +29,94 @@ service_client = ServiceClient()
 
 @service.get('/')
 def home():
-    # flash('This is an info message', 'info')
-    # flash('This is a danger message', 'danger')
-
-    context_uuid = session.get('context_uuid', '-')
-    if context_uuid == "-":
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
-    request = ContextId()
-    request.context_uuid.uuid = context_uuid
+    context_uuid = session['context_uuid']
+
     context_client.connect()
-    try:
-        service_list = context_client.ListServices(request)
-        # print(service_list)
-        services = service_list.services
-        context_found = True
-    except grpc.RpcError as e:
-        if e.code() != grpc.StatusCode.NOT_FOUND: raise
-        if e.details() != 'Context({:s}) not found'.format(context_uuid): raise
-        services = []
-        context_found = False
-
-    if context_found:
-        endpoint_ids = []
-        for service_ in services:
-            endpoint_ids.extend(service_.service_endpoint_ids)
-        device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
+
+    context_obj = get_context(context_client, context_uuid, rw_copy=False)
+    if context_obj is None:
+        flash('Context({:s}) not found'.format(str(context_uuid)), 'danger')
+        services, device_names, endpoints_data = list(), list(), list()
     else:
-        device_names, endpoints_data = [],[]
+        try:
+            services = context_client.ListServices(context_obj.context_id)
+            services = services.services
+        except grpc.RpcError as e:
+            if e.code() != grpc.StatusCode.NOT_FOUND: raise
+            if e.details() != 'Context({:s}) not found'.format(context_uuid): raise
+            services, device_names, endpoints_data = list(), dict(), dict()
+        else:
+            endpoint_ids = list()
+            for service_ in services:
+                endpoint_ids.extend(service_.service_endpoint_ids)
+            device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
 
     context_client.close()
     return render_template(
         'service/home.html', services=services, device_names=device_names, endpoints_data=endpoints_data,
-        context_not_found=not context_found, ste=ServiceTypeEnum, sse=ServiceStatusEnum)
+        ste=ServiceTypeEnum, sse=ServiceStatusEnum)
 
 
 @service.route('add', methods=['GET', 'POST'])
 def add():
     flash('Add service route called', 'danger')
     raise NotImplementedError()
-    return render_template('service/home.html')
+    #return render_template('service/home.html')
 
 
 @service.get('<path:service_uuid>/detail')
 def detail(service_uuid: str):
-    context_uuid = session.get('context_uuid', '-')
-    if context_uuid == "-":
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
-    
-    request: ServiceId = ServiceId()
-    request.service_uuid.uuid = service_uuid
-    request.context_id.context_uuid.uuid = context_uuid
+    context_uuid = session['context_uuid']
+
     try:
         context_client.connect()
-        response: Service = context_client.GetService(request)
-        connections: Connection = context_client.ListConnections(request)
-        connections = connections.connections
 
-        endpoint_ids = []
-        endpoint_ids.extend(response.service_endpoint_ids)
-        for connection in connections:
-            endpoint_ids.extend(connection.path_hops_endpoint_ids)
-        device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
+        endpoint_ids = list()
+        service_obj = get_service(context_client, service_uuid, rw_copy=False)
+        if service_obj is None:
+            flash('Context({:s})/Service({:s}) not found'.format(str(context_uuid), str(service_uuid)), 'danger')
+            service_obj = Service()
+        else:
+            endpoint_ids.extend(service_obj.service_endpoint_ids)
+            connections: Connection = context_client.ListConnections(service_obj.service_id)
+            connections = connections.connections
+            for connection in connections: endpoint_ids.extend(connection.path_hops_endpoint_ids)
+
+        if len(endpoint_ids) > 0:
+            device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
+        else:
+            device_names, endpoints_data = dict(), dict()
 
         context_client.close()
+
+        return render_template(
+            'service/detail.html', service=service_obj, connections=connections, device_names=device_names,
+            endpoints_data=endpoints_data, ste=ServiceTypeEnum, sse=ServiceStatusEnum, ile=IsolationLevelEnum)
     except Exception as e:
         flash('The system encountered an error and cannot show the details of this service.', 'warning')
         current_app.logger.exception(e)
         return redirect(url_for('service.home'))
-    return render_template(
-        'service/detail.html', service=response, connections=connections, device_names=device_names,
-        endpoints_data=endpoints_data, ste=ServiceTypeEnum, sse=ServiceStatusEnum)
 
 
 @service.get('<path:service_uuid>/delete')
 def delete(service_uuid: str):
-    context_uuid = session.get('context_uuid', '-')
-    if context_uuid == "-":
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
+    context_uuid = session['context_uuid']
 
     try:
         request = ServiceId()
         request.service_uuid.uuid = service_uuid
         request.context_id.context_uuid.uuid = context_uuid
         service_client.connect()
-        response = service_client.DeleteService(request)
+        service_client.DeleteService(request)
         service_client.close()
 
         flash('Service "{:s}" deleted successfully!'.format(service_uuid), 'success')
diff --git a/src/webui/service/slice/routes.py b/src/webui/service/slice/routes.py
index 222508418a187bcab18f7d44fccf896c917c6821..cd1b672d5c1014b0e8aa301ed7b5a1f6d910f6df 100644
--- a/src/webui/service/slice/routes.py
+++ b/src/webui/service/slice/routes.py
@@ -11,11 +11,13 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
+
 import grpc
 from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for
-from common.proto.context_pb2 import ContextId, Slice, SliceId, SliceStatusEnum
+from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum
+from common.tools.context_queries.Context import get_context
 from common.tools.context_queries.EndPoint import get_endpoint_names
+from common.tools.context_queries.Slice import get_slice
 from context.client.ContextClient import ContextClient
 from slice.client.SliceClient import SliceClient
 
@@ -26,92 +28,88 @@ slice_client = SliceClient()
 
 @slice.get('/')
 def home():
-    context_uuid = session.get('context_uuid', '-')
-    if context_uuid == "-":
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
-    request = ContextId()
-    request.context_uuid.uuid = context_uuid
+    context_uuid = session['context_uuid']
+
     context_client.connect()
-    try:
-        slice_list = context_client.ListSlices(request)
-        slices = slice_list.slices
-        context_found = True
-    except grpc.RpcError as e:
-        if e.code() != grpc.StatusCode.NOT_FOUND: raise
-        if e.details() != 'Context({:s}) not found'.format(context_uuid): raise
-        slices = []
-        context_found = False
-
-    if context_found:
-        endpoint_ids = []
-        for slice_ in slices:
-            endpoint_ids.extend(slice_.slice_endpoint_ids)
-        device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
+
+    context_obj = get_context(context_client, context_uuid, rw_copy=False)
+    if context_obj is None:
+        flash('Context({:s}) not found'.format(str(context_uuid)), 'danger')
+        device_names, endpoints_data = list(), list()
     else:
-        device_names, endpoints_data = [],[]
+        try:
+            slices = context_client.ListSlices(context_obj.context_id)
+            slices = slices.slices
+        except grpc.RpcError as e:
+            if e.code() != grpc.StatusCode.NOT_FOUND: raise
+            if e.details() != 'Context({:s}) not found'.format(context_uuid): raise
+            slices, device_names, endpoints_data = list(), dict(), dict()
+        else:
+            endpoint_ids = list()
+            for slice_ in slices:
+                endpoint_ids.extend(slice_.slice_endpoint_ids)
+            device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
 
     context_client.close()
-
     return render_template(
         'slice/home.html', slices=slices, device_names=device_names, endpoints_data=endpoints_data,
-        context_not_found=not context_found, sse=SliceStatusEnum)
+        sse=SliceStatusEnum)
 
 
 @slice.route('add', methods=['GET', 'POST'])
 def add():
     flash('Add slice route called', 'danger')
     raise NotImplementedError()
-    return render_template('slice/home.html')
+    #return render_template('slice/home.html')
 
 
 @slice.get('<path:slice_uuid>/detail')
 def detail(slice_uuid: str):
-    context_uuid = session.get('context_uuid', '-')
-    if context_uuid == "-":
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
-    
-    request: SliceId = SliceId()
-    request.slice_uuid.uuid = slice_uuid
-    request.context_id.context_uuid.uuid = context_uuid
-    req = ContextId()
-    req.context_uuid.uuid = context_uuid
+    context_uuid = session['context_uuid']
+
     try:
         context_client.connect()
-        response: Slice = context_client.GetSlice(request)
-        services = context_client.ListServices(req)
 
-        endpoint_ids = []
-        endpoint_ids.extend(response.slice_endpoint_ids)
-        device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids)
+        slice_obj = get_slice(context_client, slice_uuid, rw_copy=False)
+        if slice_obj is None:
+            flash('Context({:s})/Slice({:s}) not found'.format(str(context_uuid), str(slice_uuid)), 'danger')
+            slice_obj = Slice()
+        else:
+            device_names, endpoints_data = get_endpoint_names(context_client, slice_obj.slice_endpoint_ids)
 
         context_client.close()
+
+        return render_template(
+            'slice/detail.html', slice=slice_obj, device_names=device_names, endpoints_data=endpoints_data,
+            sse=SliceStatusEnum, ile=IsolationLevelEnum)
     except Exception as e:
         flash('The system encountered an error and cannot show the details of this slice.', 'warning')
         current_app.logger.exception(e)
         return redirect(url_for('slice.home'))
-    return render_template(
-        'slice/detail.html', slice=response, device_names=device_names, endpoints_data=endpoints_data,
-        sse=SliceStatusEnum, services=services)
-
-#@slice.get('<path:slice_uuid>/delete')
-#def delete(slice_uuid: str):
-#    context_uuid = session.get('context_uuid', '-')
-#    if context_uuid == "-":
-#        flash("Please select a context!", "warning")
-#        return redirect(url_for("main.home"))
-#
-#    try:
-#        request = SliceId()
-#        request.slice_uuid.uuid = slice_uuid
-#        request.context_id.context_uuid.uuid = context_uuid
-#        slice_client.connect()
-#        response = slice_client.DeleteSlice(request)
-#        slice_client.close()
-#
-#        flash('Slice "{:s}" deleted successfully!'.format(slice_uuid), 'success')
-#    except Exception as e:
-#        flash('Problem deleting slice "{:s}": {:s}'.format(slice_uuid, str(e.details())), 'danger')
-#        current_app.logger.exception(e) 
-#    return redirect(url_for('slice.home'))
+
+@slice.get('<path:slice_uuid>/delete')
+def delete(slice_uuid: str):
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
+        flash("Please select a context!", "warning")
+        return redirect(url_for("main.home"))
+    context_uuid = session['context_uuid']
+
+    try:
+        request = SliceId()
+        request.slice_uuid.uuid = slice_uuid
+        request.context_id.context_uuid.uuid = context_uuid
+        slice_client.connect()
+        slice_client.DeleteSlice(request)
+        slice_client.close()
+
+        flash('Slice "{:s}" deleted successfully!'.format(slice_uuid), 'success')
+    except Exception as e:
+        flash('Problem deleting slice "{:s}": {:s}'.format(slice_uuid, str(e.details())), 'danger')
+        current_app.logger.exception(e) 
+    return redirect(url_for('slice.home'))
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 0aa022f1453eaa33a67212174cf9687a942b10f0..1dfa3687198d8a33db346ba2bbcd2989f6f109bb 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -86,10 +86,16 @@
                 <li class="nav-item">
                   <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
                 </li>
-  
                 <li class="nav-item">
                   <a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
                 </li>
+                <li class="nav-item">
+                  {% if '/load-gen/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('load_gen.home') }}">Load Generator</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('load_gen.home') }}">Load Generator</a>
+                  {% endif %}
+                </li>
   
                 <!-- <li class="nav-item">
                   <a class="nav-link" href="#">Context</a>
@@ -103,7 +109,7 @@
                 </li>
               </ul>
               <span class="navbar-text" style="color: #fff;">
-                Current Context(<b>{{ get_working_context() }}</b>)/Topology(<b>{{ get_working_topology() }}</b>)
+                Selected Context(<b>{{ get_working_context() }}</b>)/Topology(<b>{{ get_working_topology() }}</b>)
               </span>
             </div>
           </div>
diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html
index de8bb4a81da5e595f33297070697b528dff26ff4..1b4b43f5ad12956ae8bb2b1a843ce5e57ef29a2c 100644
--- a/src/webui/service/templates/device/detail.html
+++ b/src/webui/service/templates/device/detail.html
@@ -29,13 +29,14 @@
     <div class="col-sm-3">
         <a id="update" class="btn btn-secondary" href="{{ url_for('device.update',device_uuid=device.device_id.device_uuid.uuid) }}">
             <i class="bi bi-pencil-square"></i>
-            Update
+            Update device
         </a>
     </div>
     <div class="col-sm-3">
         <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete device</button> -->
         <button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
-            <i class="bi bi-x-square"></i>Delete device
+            <i class="bi bi-x-square"></i>
+            Delete device
         </button>
     </div>
 </div>
diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html
new file mode 100644
index 0000000000000000000000000000000000000000..d58f42601925ca438ab9d9f20b32f94960b5cada
--- /dev/null
+++ b/src/webui/service/templates/load_gen/home.html
@@ -0,0 +1,155 @@
+<!--
+ Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+    <h1>Load Generator</h1>
+    <br />
+
+    <form id="load_gen_form" method="POST" action="{{ form_action }}">
+        {{ form.hidden_tag() }}
+        <fieldset>
+            <div class="row mb-3">
+                {{ form.num_requests.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.num_requests.errors %}
+                        {{ form.num_requests(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.num_requests.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.num_requests(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.num_generated.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.num_generated.errors %}
+                        {{ form.num_generated(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.num_generated.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.num_generated(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-2 col-form-label">Service Types:</div>
+                <div class="col-sm-10">
+                    {{ form.request_type_slice_l2nm   }} {{ form.request_type_slice_l2nm  .label(class="col-sm-3 col-form-label") }}
+                    {{ form.request_type_slice_l3nm   }} {{ form.request_type_slice_l3nm  .label(class="col-sm-3 col-form-label") }}
+                    <br/>
+                    {{ form.request_type_service_l2nm }} {{ form.request_type_service_l2nm.label(class="col-sm-3 col-form-label") }}
+                    {{ form.request_type_service_l3nm }} {{ form.request_type_service_l3nm.label(class="col-sm-3 col-form-label") }}
+                    <br/>
+                    {{ form.request_type_service_mw   }} {{ form.request_type_service_mw  .label(class="col-sm-3 col-form-label") }}
+                    {{ form.request_type_service_tapi }} {{ form.request_type_service_tapi.label(class="col-sm-3 col-form-label") }}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.offered_load.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.offered_load.errors %}
+                        {{ form.offered_load(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.offered_load.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.offered_load(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.holding_time.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.holding_time.errors %}
+                        {{ form.holding_time(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.holding_time.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.holding_time(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.inter_arrival_time.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.inter_arrival_time.errors %}
+                        {{ form.inter_arrival_time(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.inter_arrival_time.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.inter_arrival_time(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-10">
+                    {{ form.do_teardown }} {{ form.do_teardown.label(class="col-sm-3 col-form-label") }}<br/>
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-2 col-form-label">DLT Settings:</div>
+                <div class="col-sm-10">
+                    {{ form.record_to_dlt }} {{ form.record_to_dlt.label(class="col-sm-3 col-form-label") }} <br/>
+                    {{ form.dlt_domain_id.label(class="col-sm-2 col-form-label") }}
+                    {% if form.dlt_domain_id.errors %}
+                        {{ form.dlt_domain_id(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.dlt_domain_id.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.dlt_domain_id(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-2 col-form-label">Status:</div>
+                <div class="col-sm-10">
+                    {{ form.infinite_loop }} {{ form.infinite_loop.label(class="col-sm-3 col-form-label") }}
+                    {{ form.running }} {{ form.running.label(class="col-sm-3 col-form-label") }}
+                </div>
+            </div>
+            <br />
+
+            <div class="d-grid gap-2 d-md-flex justify-content-md-start">
+                {{ form.submit(class="btn btn-primary") }}
+            </div>
+        </fieldset>
+    </form>
+
+{% endblock %}
diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html
index 11a868fdff9f5ee1bcbf22936ae0283d4ccc5715..eef42ae9a9f4cf386d26da0449681bab75f33b41 100644
--- a/src/webui/service/templates/main/debug.html
+++ b/src/webui/service/templates/main/debug.html
@@ -17,26 +17,12 @@
 {% extends 'base.html' %}
 
 {% block content %}
-    <h1>Debug</h1>
+    <h1>Debug API</h1>
 
-    <!--
-        <h3>Dump ContextDB:</h3>
-        <ul>
-            <li>
-                <a class="nav-link" href="/context/api/dump/html" id="context_html_link" target="context_html">
-                    as HTML
-                </a>
-            </li>
-            <li>
-                <a class="nav-link" href="/context/api/dump/text" id="context_text_link" target="context_text">
-                    as Text
-                </a>
-            </li>
-        </ul>
-    -->
-
-    <h3>Load Generator:</h3>
-    <a href="{{ url_for('load_gen.start') }}" class="btn btn-primary" style="margin-bottom: 10px;">Start</a>
-    <a href="{{ url_for('load_gen.stop') }}" class="btn btn-primary" style="margin-bottom: 10px;">Stop</a>
+    <ul>
+        <li><a class="nav-link" href="/restconf/debug-api/contexts" id="contexts_link" target="contexts">Contexts</a></li>
+        <li><a class="nav-link" href="/restconf/debug-api/devices" id="devices_link" target="devices">Devices</a></li>
+        <li><a class="nav-link" href="/restconf/debug-api/links" id="links_link" target="links">Links</a></li>
+    </ul>
 
 {% endblock %}
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html
index b2160695173064b9863834a4d42c60a69cc913ba..bee2e93c53896a8eeac826703a60afe02a5aa825 100644
--- a/src/webui/service/templates/service/detail.html
+++ b/src/webui/service/templates/service/detail.html
@@ -36,7 +36,8 @@
     <div class="col-sm-3">
         <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete service</button> -->
         <button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
-            <i class="bi bi-x-square"></i>Delete service
+            <i class="bi bi-x-square"></i>
+            Delete service
         </button>
     </div>
 </div>
@@ -87,7 +88,7 @@
     <thead>
         <tr>
             <th scope="col">Kind</th>
-            <th scope="col">Type</th>
+            <th scope="col">Key/Type</th>
             <th scope="col">Value</th>
         </tr>
     </thead>
@@ -135,15 +136,43 @@
             </td>
             <td>{{ constraint.endpoint_priority.priority }}</td>
         </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_capacity' %}
+        <tr>
+            <td>SLA Capacity</td>
+            <td>-</td>
+            <td>
+                {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps
+            </td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_latency' %}
+        <tr>
+            <td>SLA E2E Latency</td>
+            <td>-</td>
+            <td>
+                {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms
+            </td>
+        </tr>
         {% elif constraint.WhichOneof('constraint')=='sla_availability' %}
         <tr>
             <td>SLA Availability</td>
             <td>-</td>
             <td>
+                {{ round(constraint.sla_availability.availability, ndigits=5) }} %;
                 {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
                 {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active
             </td>
         </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_isolation' %}
+        <tr>
+            <td>SLA Isolation</td>
+            <td>-</td>
+            <td>
+                {% for i,isolation_level in enumerate(constraint.sla_isolation.isolation_level) %}
+                    {% if i > 0 %}, {% endif %}
+                    {{ ile.Name(isolation_level) }}
+                {% endfor %}
+            </td>
+        </tr>
         {% else %}
         <tr>
             <td>-</td>
@@ -185,34 +214,12 @@
         {% endfor %}
     </tbody>
 </table>
-<!-- Modal -->
-<div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1"
-    aria-labelledby="staticBackdropLabel" aria-hidden="true">
-    <div class="modal-dialog">
-        <div class="modal-content">
-            <div class="modal-header">
-                <h5 class="modal-title" id="staticBackdropLabel">Delete service?</h5>
-                <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
-            </div>
-            <div class="modal-body">
-                Are you sure you want to delete the service "{{ service.service_id.service_uuid.uuid }}"?
-            </div>
-            <div class="modal-footer">
-                <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
-                <a type="button" class="btn btn-danger"
-                    href="{{ url_for('service.delete', service_uuid=service.service_id.service_uuid.uuid) }}"><i
-                        class="bi bi-exclamation-diamond"></i>Yes</a>
-            </div>
-        </div>
-    </div>
-</div>
-
 
 <table class="table table-striped table-hover">
     <thead>
         <tr>
             <th scope="col">Connection Id</th>
-            <th scope="col">Sub-service</th>
+            <th scope="col">Sub-Service</th>
             <th scope="col">Path</th>
         </tr>
     </thead>
@@ -258,8 +265,26 @@
     </tbody>
 </table>
 
+<!-- Modal -->
+<div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1"
+    aria-labelledby="staticBackdropLabel" aria-hidden="true">
+    <div class="modal-dialog">
+        <div class="modal-content">
+            <div class="modal-header">
+                <h5 class="modal-title" id="staticBackdropLabel">Delete service?</h5>
+                <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
+            </div>
+            <div class="modal-body">
+                Are you sure you want to delete the service "{{ service.service_id.service_uuid.uuid }}"?
+            </div>
+            <div class="modal-footer">
+                <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
+                <a type="button" class="btn btn-danger"
+                    href="{{ url_for('service.delete', service_uuid=service.service_id.service_uuid.uuid) }}"><i
+                        class="bi bi-exclamation-diamond"></i>Yes</a>
+            </div>
+        </div>
+    </div>
+</div>
 
-
-
-
-{% endblock %}
\ No newline at end of file
+{% endblock %}
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
index 390f882d7058b825ecf9d2bce5689585f99b80aa..8f223e44deda37b177a360a51b1e366f680fac27 100644
--- a/src/webui/service/templates/slice/detail.html
+++ b/src/webui/service/templates/slice/detail.html
@@ -32,14 +32,14 @@
             <i class="bi bi-pencil-square"></i>
             Update
         </a>
-    </div>
-    <div class="col-sm-3">-->
+    </div>-->
+    <div class="col-sm-3">
         <!-- <button type="button" class="btn btn-danger"><i class="bi bi-x-square"></i>Delete slice</button> -->
-        <!--<button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
-            <i class="bi bi-x-square"></i>Delete slice
+        <button type="button" class="btn btn-danger" data-bs-toggle="modal" data-bs-target="#deleteModal">
+            <i class="bi bi-x-square"></i>
+            Delete slice
         </button>
     </div>
-    -->
 </div>
 
 <div class="row mb-3">
@@ -88,7 +88,7 @@
     <thead>
         <tr>
             <th scope="col">Kind</th>
-            <th scope="col">Type</th>
+            <th scope="col">Key/Type</th>
             <th scope="col">Value</th>
         </tr>
     </thead>
@@ -136,15 +136,43 @@
             </td>
             <td>{{ constraint.endpoint_priority.priority }}</td>
         </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_capacity' %}
+        <tr>
+            <td>SLA Capacity</td>
+            <td>-</td>
+            <td>
+                {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps
+            </td>
+        </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_latency' %}
+        <tr>
+            <td>SLA E2E Latency</td>
+            <td>-</td>
+            <td>
+                {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms
+            </td>
+        </tr>
         {% elif constraint.WhichOneof('constraint')=='sla_availability' %}
         <tr>
             <td>SLA Availability</td>
             <td>-</td>
             <td>
+                {{ round(constraint.sla_availability.availability, ndigits=5) }} %;
                 {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
                 {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active
             </td>
         </tr>
+        {% elif constraint.WhichOneof('constraint')=='sla_isolation' %}
+        <tr>
+            <td>SLA Isolation</td>
+            <td>-</td>
+            <td>
+                {% for i,isolation_level in enumerate(constraint.sla_isolation.isolation_level) %}
+                    {% if i > 0 %}, {% endif %}
+                    {{ ile.Name(isolation_level) }}
+                {% endfor %}
+            </td>
+        </tr>
         {% else %}
         <tr>
             <td>-</td>
@@ -191,7 +219,7 @@
         <table class="table table-striped table-hover">
             <thead>
                 <tr>
-                    <th scope="col">Service Id</th>
+                    <th scope="col">Sub-Services</th>
                 </tr>
             </thead>
             <tbody>
@@ -219,7 +247,7 @@
         <table class="table table-striped table-hover">
             <thead>
                 <tr>
-                    <th scope="col">Sub-slices</th>
+                    <th scope="col">Sub-Slices</th>
                 </tr>
             </thead>
             <tbody>
@@ -244,4 +272,27 @@
         </table>
     </div>
 </div>
-{% endblock %}
\ No newline at end of file
+
+<!-- Modal -->
+<div class="modal fade" id="deleteModal" data-bs-backdrop="static" data-bs-keyboard="false" tabindex="-1"
+    aria-labelledby="staticBackdropLabel" aria-hidden="true">
+    <div class="modal-dialog">
+        <div class="modal-content">
+            <div class="modal-header">
+                <h5 class="modal-title" id="staticBackdropLabel">Delete slice?</h5>
+                <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
+            </div>
+            <div class="modal-body">
+                Are you sure you want to delete the slice "{{ slice.slice_id.slice_uuid.uuid }}"?
+            </div>
+            <div class="modal-footer">
+                <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">No</button>
+                <a type="button" class="btn btn-danger"
+                    href="{{ url_for('slice.delete', slice_uuid=slice.slice_id.slice_uuid.uuid) }}"><i
+                        class="bi bi-exclamation-diamond"></i>Yes</a>
+            </div>
+        </div>
+    </div>
+</div>
+
+{% endblock %}