diff --git a/deploy/all.sh b/deploy/all.sh
index 09239afed7ba036b214742e636017a58c072f6b3..6f5592cb43a5f214b2536226bb857629ad0c3cf0 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -110,10 +110,14 @@ export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
 # If not already set, set the table name to be used by Monitoring for KPIs.
 export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
 
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
+
 # If not already set, disable flag for dropping tables if they exist.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
-# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variable
-# QDB_TABLE_MONITORING_KPIS will be dropped while checking/deploying QuestDB.
+# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
+# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while 
+# checking/deploying QuestDB.
 export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 
 # If not already set, disable flag for re-deploying QuestDB from scratch.
diff --git a/deploy/qdb.sh b/deploy/qdb.sh
index a654088049df871fac0f4d19c225b2246f464f8e..d94c000bf8d40c72faa255e7c6554926b6f683d3 100755
--- a/deploy/qdb.sh
+++ b/deploy/qdb.sh
@@ -30,10 +30,14 @@ export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
 # If not already set, set the table name to be used by Monitoring for KPIs.
 export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
 
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
+
 # If not already set, disable flag for dropping tables if they exist.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
-# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variable
-# QDB_TABLE_MONITORING_KPIS will be dropped while checking/deploying QuestDB.
+# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables
+# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped
+# while checking/deploying QuestDB.
 export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 
 # If not already set, disable flag for re-deploying QuestDB from scratch.
@@ -151,6 +155,8 @@ function qdb_drop_tables() {
     echo "Drop tables, if exist"
     curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;"
     echo
+    curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;"
+    echo
 }
 
 if [ "$QDB_REDEPLOY" == "YES" ]; then
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 2bacc8cacb18c3cba10247472798dc0644aab2bf..16cf5c13bd4532aac0267b7904c6c403d7ac057c 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -81,6 +81,9 @@ export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
 # If not already set, set the table name to be used by Monitoring for KPIs.
 export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
 
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
+
 
 ########################################################################################################################
 # Automated steps start here
@@ -131,6 +134,7 @@ kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type='
     --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \
     --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \
     --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \
+    --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \
     --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \
     --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD}
 printf "\n"
@@ -321,11 +325,17 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
         "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
     }' ${GRAFANA_URL_DEFAULT}/api/user/password
     echo
+    echo
 
     # Updated Grafana API URL
     GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}"
     echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT
 
+    echo ">> Installing Scatter Plot plugin..."
+    curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \
+        ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install
+    echo
+
     # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
     QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}"
     echo ">> Creating datasources..."
@@ -354,17 +364,51 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
     }' ${GRAFANA_URL_UPDATED}/api/datasources
     echo
 
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-slc-grp",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_SLICE_GROUPS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    printf "\n\n"
+
     echo ">> Creating dashboards..."
     # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
     curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
         ${GRAFANA_URL_UPDATED}/api/dashboards/db
     echo
 
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    printf "\n\n"
+
     echo ">> Staring dashboards..."
     DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
     DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
     curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
     echo
 
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
     printf "\n\n"
 fi
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index 447f6a1c77cc6862db3df3e83b73add3257a5c0d..e5757874b7e241d7c6b0bd050ac2aae47a1610e3 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -37,6 +37,11 @@ spec:
         env:
         - name: LOG_LEVEL
           value: "INFO"
+        - name: SLICE_GROUPING
+          value: "ENABLE"
+        envFrom:
+        - secretRef:
+            name: qdb-data
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:4040"]
diff --git a/my_deploy.sh b/my_deploy.sh
index 1efea75bb3fb008e4a54d42135436a7373fd926e..6a360812b41251b50758da4556a7e360a614d3e0 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -57,7 +57,7 @@ export CRDB_DATABASE="tfs"
 export CRDB_DEPLOY_MODE="single"
 
 # Disable flag for dropping database, if it exists.
-export CRDB_DROP_DATABASE_IF_EXISTS=""
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
 
 # Disable flag for re-deploying CockroachDB from scratch.
 export CRDB_REDEPLOY=""
@@ -86,8 +86,11 @@ export QDB_PASSWORD="quest"
 # Set the table name to be used by Monitoring for KPIs.
 export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
 
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
 # Disable flag for dropping tables if they exist.
-export QDB_DROP_TABLES_IF_EXIST=""
+export QDB_DROP_TABLES_IF_EXIST="YES"
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
diff --git a/proto/context.proto b/proto/context.proto
index e403c4a22f2df62f695041c094cc1c6e6a193d5f..49d16229cdac5de84f25cfaa7d196d25184f46f0 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -509,6 +509,7 @@ message Constraint_SLA_Capacity {
 message Constraint_SLA_Availability {
   uint32 num_disjoint_paths = 1;
   bool all_active = 2;
+  float availability = 3; // 0.0 .. 100.0 percentage of availability
 }
 
 enum IsolationLevelEnum {
diff --git a/proto/load_generator.proto b/proto/load_generator.proto
index 98f6eefda88db7abac4651857326952789a879ba..86f9469588f1586da5339edad198e39e82598cde 100644
--- a/proto/load_generator.proto
+++ b/proto/load_generator.proto
@@ -18,6 +18,36 @@ package load_generator;
 import "context.proto";
 
 service LoadGeneratorService {
-  rpc Start(context.Empty) returns (context.Empty) {}
-  rpc Stop (context.Empty) returns (context.Empty) {}
+  rpc Start    (Parameters   ) returns (context.Empty) {}
+  rpc GetStatus(context.Empty) returns (Status       ) {}
+  rpc Stop     (context.Empty) returns (context.Empty) {}
+}
+
+enum RequestTypeEnum {
+  REQUESTTYPE_UNDEFINED    = 0;
+  REQUESTTYPE_SERVICE_L2NM = 1;
+  REQUESTTYPE_SERVICE_L3NM = 2;
+  REQUESTTYPE_SERVICE_MW   = 3;
+  REQUESTTYPE_SERVICE_TAPI = 4;
+  REQUESTTYPE_SLICE_L2NM   = 5;
+  REQUESTTYPE_SLICE_L3NM   = 6;
+}
+
+message Parameters {
+  uint64 num_requests = 1;  // if == 0, generate infinite requests
+  repeated RequestTypeEnum request_types = 2;
+  float offered_load = 3;
+  float holding_time = 4;
+  float inter_arrival_time = 5;
+  bool do_teardown = 6;
+  bool dry_mode = 7;
+  bool record_to_dlt = 8;
+  string dlt_domain_id = 9;
+}
+
+message Status {
+  Parameters parameters = 1;
+  uint64 num_generated = 2;
+  bool infinite_loop = 3;
+  bool running = 4;
 }
diff --git a/src/load_generator/client/LoadGeneratorClient.py b/src/load_generator/client/LoadGeneratorClient.py
index 99626bbbb59671af41c11054d34338194f42a6af..2bed40dfdfe13d2920166bcb56237fe84bff8789 100644
--- a/src/load_generator/client/LoadGeneratorClient.py
+++ b/src/load_generator/client/LoadGeneratorClient.py
@@ -16,6 +16,7 @@ import grpc, logging
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_host, get_service_port_grpc
 from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2 import Parameters, Status
 from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceStub
 from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
@@ -46,12 +47,19 @@ class LoadGeneratorClient:
         self.stub = None
 
     @RETRY_DECORATOR
-    def Start(self, request : Empty) -> Empty:
+    def Start(self, request : Parameters) -> Empty:
         LOGGER.debug('Start request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.Start(request)
         LOGGER.debug('Start result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
+    @RETRY_DECORATOR
+    def GetStatus(self, request : Empty) -> Status:
+        LOGGER.debug('GetStatus request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetStatus(request)
+        LOGGER.debug('GetStatus result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
     @RETRY_DECORATOR
     def Stop(self, request : Empty) -> Empty:
         LOGGER.debug('Stop request: {:s}'.format(grpc_message_to_json_string(request)))
diff --git a/src/load_generator/service/Constants.py b/src/load_generator/service/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c339877c70363e874df278d6b5d29cc47a3be0f
--- /dev/null
+++ b/src/load_generator/service/Constants.py
@@ -0,0 +1,27 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.load_generator_pb2 import RequestTypeEnum
+from load_generator.load_gen.Constants import RequestType
+
+REQUEST_TYPE_MAP = {
+    RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM : RequestType.SERVICE_L2NM,
+    RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM : RequestType.SERVICE_L3NM,
+    RequestTypeEnum.REQUESTTYPE_SERVICE_MW   : RequestType.SERVICE_MW,
+    RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI : RequestType.SERVICE_TAPI,
+    RequestTypeEnum.REQUESTTYPE_SLICE_L2NM   : RequestType.SLICE_L2NM,
+    RequestTypeEnum.REQUESTTYPE_SLICE_L3NM   : RequestType.SLICE_L3NM,
+}
+
+REQUEST_TYPE_REVERSE_MAP = {v:k for k,v in REQUEST_TYPE_MAP.items()}
diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
index c280581ddfab488249ff249e60118ec3030e0447..d66b0b2c10c5228e0c3d15759fc46b2c0770154d 100644
--- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
+++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
@@ -12,43 +12,39 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Optional
 import grpc, logging
+from typing import Optional
 from apscheduler.schedulers.background import BackgroundScheduler
 from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2 import Parameters, Status
 from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer
-from load_generator.load_gen.Constants import RequestType
-from load_generator.load_gen.Parameters import Parameters
+from load_generator.load_gen.Parameters import Parameters as LoadGen_Parameters
 from load_generator.load_gen.RequestGenerator import RequestGenerator
 from load_generator.load_gen.RequestScheduler import RequestScheduler
+from .Constants import REQUEST_TYPE_MAP, REQUEST_TYPE_REVERSE_MAP
 
 LOGGER = logging.getLogger(__name__)
 
 class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
     def __init__(self):
         LOGGER.debug('Creating Servicer...')
-        self._parameters = Parameters(
-            num_requests = 100,
-            request_types = [
-                RequestType.SERVICE_L2NM,
-                RequestType.SERVICE_L3NM,
-                #RequestType.SERVICE_MW,
-                #RequestType.SERVICE_TAPI,
-                RequestType.SLICE_L2NM,
-                RequestType.SLICE_L3NM,
-            ],
-            offered_load  = 50,
-            holding_time  = 10,
-            do_teardown   = True,
-            dry_mode      = False,           # in dry mode, no request is sent to TeraFlowSDN
-            record_to_dlt = False,           # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT
-            dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False
-        )
         self._generator : Optional[RequestGenerator] = None
         self._scheduler : Optional[RequestScheduler] = None
         LOGGER.debug('Servicer Created')
 
-    def Start(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+    def Start(self, request : Parameters, context : grpc.ServicerContext) -> Empty:
+        self._parameters = LoadGen_Parameters(
+            num_requests       = request.num_requests,
+            request_types      = [REQUEST_TYPE_MAP[rt] for rt in request.request_types],
+            offered_load       = request.offered_load if request.offered_load > 1.e-12 else None,
+            holding_time       = request.holding_time if request.holding_time > 1.e-12 else None,
+            inter_arrival_time = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None,
+            do_teardown        = request.do_teardown,   # if set, schedule tear down of requests
+            dry_mode           = request.dry_mode,      # in dry mode, no request is sent to TeraFlowSDN
+            record_to_dlt      = request.record_to_dlt, # if set, upload changes to DLT
+            dlt_domain_id      = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True)
+        )
+
         LOGGER.info('Initializing Generator...')
         self._generator = RequestGenerator(self._parameters)
         self._generator.initialize()
@@ -58,6 +54,33 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
         self._scheduler.start()
         return Empty()
 
+    def GetStatus(self, request : Empty, context : grpc.ServicerContext) -> Status:
+        if self._scheduler is None:
+            # not started
+            status = Status()
+            status.num_generated = 0
+            status.infinite_loop = False
+            status.running       = False
+            return status
+
+        params = self._scheduler._parameters
+        request_types = [REQUEST_TYPE_REVERSE_MAP[rt] for rt in params.request_types]
+
+        status = Status()
+        status.num_generated = self._scheduler.num_generated
+        status.infinite_loop = self._scheduler.infinite_loop
+        status.running       = self._scheduler.running
+        status.parameters.num_requests       = params.num_requests          # pylint: disable=no-member
+        status.parameters.offered_load       = params.offered_load          # pylint: disable=no-member
+        status.parameters.holding_time       = params.holding_time          # pylint: disable=no-member
+        status.parameters.inter_arrival_time = params.inter_arrival_time    # pylint: disable=no-member
+        status.parameters.do_teardown        = params.do_teardown           # pylint: disable=no-member
+        status.parameters.dry_mode           = params.dry_mode              # pylint: disable=no-member
+        status.parameters.record_to_dlt      = params.record_to_dlt         # pylint: disable=no-member
+        status.parameters.dlt_domain_id      = params.dlt_domain_id         # pylint: disable=no-member
+        status.parameters.request_types.extend(request_types)               # pylint: disable=no-member
+        return status
+
     def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty:
         if self._scheduler is not None:
             self._scheduler.stop()
diff --git a/src/slice/client/SliceClient.py b/src/slice/client/SliceClient.py
index a3e5d649032bbf939f9ba6d812b270ca3384cc06..792a2037f0a7cb47d6f0c2e7969708425b57b3a6 100644
--- a/src/slice/client/SliceClient.py
+++ b/src/slice/client/SliceClient.py
@@ -65,3 +65,17 @@ class SliceClient:
         response = self.stub.DeleteSlice(request)
         LOGGER.debug('DeleteSlice result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
+
+    @RETRY_DECORATOR
+    def OrderSliceWithSLA(self, request : Slice) -> SliceId:
+        LOGGER.debug('OrderSliceWithSLA request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.OrderSliceWithSLA(request)
+        LOGGER.debug('OrderSliceWithSLA result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RunSliceGrouping(self, request : Empty) -> Empty:
+        LOGGER.debug('RunSliceGrouping request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RunSliceGrouping(request)
+        LOGGER.debug('RunSliceGrouping result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/slice/requirements.in b/src/slice/requirements.in
index daef740da4729659fb3117eadff31994acdf5746..854c71a5948e91077fba4561f961083ed90b0861 100644
--- a/src/slice/requirements.in
+++ b/src/slice/requirements.in
@@ -12,5 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 #deepdiff==5.8.*
+numpy==1.23.*
+pandas==1.5.*
+questdb==1.0.1
+requests==2.27.*
+scikit-learn==1.1.*
diff --git a/src/slice/service/README.md b/src/slice/service/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..696b4a6e099cfc8463db6f93e5940cbc1d9c32e1
--- /dev/null
+++ b/src/slice/service/README.md
@@ -0,0 +1,38 @@
+# SLICE GROUPING details
+
+## Description
+- Similar slice requests can share underlying services.
+- Clustering algorithm for slice grouping.
+- Consider both paths and SLA constraints.
+- SLA monitored by slice group.
+
+## TFS Target Objective
+- Objective 3.2: Provisioning of multi-tenant transport network slices.
+- Improve network resource usage by 30% by adopting multi-tenancy resource allocation algorithms.
+- Optimal slice grouping: trade-offs between economies of scale and limitations as to which SLAs can be grouped together need to be considered.
+- Optimal grouping of slices is required to maximise KPIs, such as resource utilisation, utility of the connectivity, and energy efficiency.
+- In this context, trade-offs between the resulting control plane complexity and differential treatment of SLA classes should be considered.
+
+## New Requirements
+- User can select if slice grouping is performed per-slice request.
+- Slice grouping introduces a clustering algorithm for finding service optimisation while preserving slice SLA.
+- Service (re-)optimisation is provided.
+
+## TFS Architecture Update
+- Update Slice service RPC to include Slice Grouping.
+- Use novel Slice model with SLA constraints.
+- Use Policy Component with action to update services to apply slice grouping.
+- Describe Slice service operation modes: per-request or user-triggered.
+
+    OSS/BSS --> Slice   : Create Slice with SLA (slice)
+    Slice   --> Slice   : Slice Grouping (slice)
+alt [slice can be grouped to other slice services]
+    // do nothing and return existing slice
+else [slice needs new services]
+    Slice   --> ... : normal logic
+end alt
+    Slice   --> OSS/BSS : slice
+
+slice.proto:
+  rpc OrderSliceWithSLA(context.Slice) returns (context.SliceId) {} // If slice with SLA already exists, returns slice. If not, it creates it.
+  rpc RunSliceGrouping (context.Empty) returns (context.Empty) {} // Optimizes the underlying services and re-maps them to the requested slices.
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index d49f1c547e473a9999bed06508f249cb9afcf275..717127a0048a5bb20a0f1689268f7029f7cf0438 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -28,6 +28,7 @@ from common.tools.grpc.ServiceIds import update_service_ids
 from context.client.ContextClient import ContextClient
 from interdomain.client.InterdomainClient import InterdomainClient
 from service.client.ServiceClient import ServiceClient
+from .slice_grouper.SliceGrouper import SliceGrouper
 
 LOGGER = logging.getLogger(__name__)
 
@@ -36,6 +37,7 @@ METRICS_POOL = MetricsPool('Slice', 'RPC')
 class SliceServiceServicerImpl(SliceServiceServicer):
     def __init__(self):
         LOGGER.debug('Creating Servicer...')
+        self._slice_grouper = SliceGrouper()
         LOGGER.debug('Servicer Created')
 
     def create_update(self, request : Slice) -> SliceId:
@@ -82,6 +84,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             context_client.SetSlice(slice_active)
             return slice_id
 
+        if self._slice_grouper.is_enabled:
+            grouped = self._slice_grouper.group(slice_with_uuids)
+
         # Local domain slice
         service_id = ServiceId()
         # pylint: disable=no-member
@@ -202,6 +207,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             current_slice.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member
             context_client.SetSlice(current_slice)
 
+            if self._slice_grouper.is_enabled:
+                ungrouped = self._slice_grouper.ungroup(current_slice)
+
             service_client = ServiceClient()
             for service_id in _slice.slice_service_ids:
                 current_slice = Slice()
diff --git a/src/slice/service/slice_grouper/Constants.py b/src/slice/service/slice_grouper/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..2edd853a2202fc64f107ea8c6688d19d6ab2692e
--- /dev/null
+++ b/src/slice/service/slice_grouper/Constants.py
@@ -0,0 +1,22 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO: define by means of settings
+SLICE_GROUPS = [
+    ('bronze',   10.0,  10.0), # Bronze   (10%, 10Gb/s)
+    ('silver',   30.0,  40.0), # Silver   (30%, 40Gb/s)
+    ('gold',     70.0,  50.0), # Gold     (70%, 50Gb/s)
+    ('platinum', 99.0, 100.0), # Platinum (99%, 100Gb/s)
+]
+SLICE_GROUP_NAMES = {slice_group[0] for slice_group in SLICE_GROUPS}
diff --git a/src/slice/service/slice_grouper/MetricsExporter.py b/src/slice/service/slice_grouper/MetricsExporter.py
new file mode 100644
index 0000000000000000000000000000000000000000..3708641eef64e100fae18e875a4fbc4896357057
--- /dev/null
+++ b/src/slice/service/slice_grouper/MetricsExporter.py
@@ -0,0 +1,126 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging, os, requests
+from typing import Any, Literal, Union
+from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module
+
+LOGGER = logging.getLogger(__name__)
+
+MAX_RETRIES = 10
+DELAY_RETRIES = 0.5
+
+MSG_EXPORT_EXECUTED   = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed'
+MSG_EXPORT_FAILED     = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...'
+MSG_REST_BAD_STATUS   = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}'
+MSG_REST_EXECUTED     = '[rest_request] Query({:s}) executed, result: {:s}'
+MSG_REST_FAILED       = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...'
+MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}'
+
+METRICSDB_HOSTNAME  = os.environ.get('METRICSDB_HOSTNAME')
+METRICSDB_ILP_PORT  = int(os.environ.get('METRICSDB_ILP_PORT'))
+METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT'))
+METRICSDB_TABLE_SLICE_GROUPS = os.environ.get('METRICSDB_TABLE_SLICE_GROUPS')
+
+COLORS = {
+    'platinum': '#E5E4E2',
+    'gold'    : '#FFD700',
+    'silver'  : '#808080',
+    'bronze'  : '#CD7F32',
+}
+DEFAULT_COLOR = '#000000' # black
+
+SQL_MARK_DELETED = "UPDATE {:s} SET is_deleted='true' WHERE slice_uuid='{:s}';"
+
+class MetricsExporter():
+    def create_table(self) -> None:
+        sql_query = ' '.join([
+            'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)),
+            ','.join([
+                'timestamp TIMESTAMP',
+                'slice_uuid SYMBOL',
+                'slice_group SYMBOL',
+                'slice_color SYMBOL',
+                'is_deleted SYMBOL',
+                'slice_availability DOUBLE',
+                'slice_capacity_center DOUBLE',
+                'slice_capacity DOUBLE',
+            ]),
+            ') TIMESTAMP(timestamp);'
+        ])
+        try:
+            result = self.rest_request(sql_query)
+            if not result: raise Exception
+            LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS)))
+        except Exception as e:
+            LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e)))
+            raise
+
+    def export_point(
+        self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float,
+        is_center : bool = False
+    ) -> None:
+        dt_timestamp = datetime.datetime.utcnow()
+        slice_color = COLORS.get(slice_group, DEFAULT_COLOR)
+        symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color, is_deleted='false')
+        columns = dict(slice_availability=slice_availability)
+        columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity
+
+        for retry in range(MAX_RETRIES):
+            try:
+                with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender:
+                    sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp)
+                    sender.flush()
+                LOGGER.debug(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns)))
+                return
+            except (Exception, IngressError): # pylint: disable=broad-except
+                LOGGER.exception(MSG_EXPORT_FAILED.format(
+                    str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
+
+    def delete_point(self, slice_uuid : str) -> None:
+        sql_query = SQL_MARK_DELETED.format(str(METRICSDB_TABLE_SLICE_GROUPS), slice_uuid)
+        try:
+            result = self.rest_request(sql_query)
+            if not result: raise Exception
+            LOGGER.debug('Point {:s} deleted'.format(str(slice_uuid)))
+        except Exception as e:
+            LOGGER.warning('Point {:s} cannot be deleted. {:s}'.format(str(slice_uuid), str(e)))
+            raise
+
+    def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]:
+        url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT)
+        params = {'query': rest_query, 'fmt': 'json'}
+
+        for retry in range(MAX_RETRIES):
+            try:
+                response = requests.get(url, params=params)
+                status_code = response.status_code
+                if status_code not in {200}:
+                    str_content = response.content.decode('UTF-8')
+                    raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content))
+
+                json_response = response.json()
+                if 'ddl' in json_response:
+                    LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl'])))
+                    return True
+                elif 'dataset' in json_response:
+                    LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset'])))
+                    return json_response['dataset']
+
+            except Exception: # pylint: disable=broad-except
+                LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
diff --git a/src/slice/service/slice_grouper/SliceGrouper.py b/src/slice/service/slice_grouper/SliceGrouper.py
new file mode 100644
index 0000000000000000000000000000000000000000..735d028993eb11e83138caebde1e32ebc830093f
--- /dev/null
+++ b/src/slice/service/slice_grouper/SliceGrouper.py
@@ -0,0 +1,94 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, pandas, threading
+from typing import Dict, Optional, Tuple
+from sklearn.cluster import KMeans
+from common.proto.context_pb2 import Slice
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from .Constants import SLICE_GROUPS
+from .MetricsExporter import MetricsExporter
+from .Tools import (
+    add_slice_to_group, create_slice_groups, get_slice_grouping_parameters, is_slice_grouping_enabled,
+    remove_slice_from_group)
+
+LOGGER = logging.getLogger(__name__)
+
+class SliceGrouper:
+    def __init__(self) -> None:
+        self._lock = threading.Lock()
+        self._is_enabled = is_slice_grouping_enabled()
+        if not self._is_enabled: return
+
+        metrics_exporter = MetricsExporter()
+        metrics_exporter.create_table()
+
+        self._slice_groups = create_slice_groups(SLICE_GROUPS)
+
+        # Initialize and fit K-Means with the pre-defined clusters we want, i.e., one per slice group
+        df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps'])
+        k_means = KMeans(n_clusters=df_groups.shape[0])
+        k_means.fit(df_groups[['availability', 'capacity_gbps']])
+        df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity_gbps']])
+        self._k_means = k_means
+        self._df_groups = df_groups
+
+        self._group_mapping : Dict[str, Dict] = {
+            group['name']:{k:v for k,v in group.items() if k != 'name'}
+            for group in list(df_groups.to_dict('records'))
+        }
+
+        label_to_group = {}
+        for group_name,group_attrs in self._group_mapping.items():
+            label = group_attrs['label']
+            availability = group_attrs['availability']
+            capacity_gbps = group_attrs['capacity_gbps']
+            metrics_exporter.export_point(
+                group_name, group_name, availability, capacity_gbps, is_center=True)
+            label_to_group[label] = group_name
+        self._label_to_group = label_to_group
+
+    def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]:
+        with self._lock:
+            grouping_parameters = get_slice_grouping_parameters(slice_obj)
+            LOGGER.debug('[_select_group] grouping_parameters={:s}'.format(str(grouping_parameters)))
+            if grouping_parameters is None: return None
+
+            sample = pandas.DataFrame([grouping_parameters], columns=['availability', 'capacity_gbps'])
+            sample['label'] = self._k_means.predict(sample)
+            sample = sample.to_dict('records')[0]   # pylint: disable=unsubscriptable-object
+            LOGGER.debug('[_select_group] sample={:s}'.format(str(sample)))
+            label = sample['label']
+            availability = sample['availability']
+            capacity_gbps = sample['capacity_gbps']
+            group_name = self._label_to_group[label]
+            LOGGER.debug('[_select_group] group_name={:s}'.format(str(group_name)))
+            return group_name, availability, capacity_gbps
+
+    @property
+    def is_enabled(self): return self._is_enabled
+
+    def group(self, slice_obj : Slice) -> bool:
+        LOGGER.debug('[group] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
+        selected_group = self._select_group(slice_obj)
+        LOGGER.debug('[group] selected_group={:s}'.format(str(selected_group)))
+        if selected_group is None: return False
+        return add_slice_to_group(slice_obj, selected_group)
+
+    def ungroup(self, slice_obj : Slice) -> bool:
+        LOGGER.debug('[ungroup] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
+        selected_group = self._select_group(slice_obj)
+        LOGGER.debug('[ungroup] selected_group={:s}'.format(str(selected_group)))
+        if selected_group is None: return False
+        return remove_slice_from_group(slice_obj, selected_group)
diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..12337cf8ee02656439e6c4284358c995afe1078a
--- /dev/null
+++ b/src/slice/service/slice_grouper/Tools.py
@@ -0,0 +1,153 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List, Optional, Set, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.Settings import get_setting
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum
+from common.tools.context_queries.Context import create_context
+from common.tools.context_queries.Slice import get_slice
+from context.client.ContextClient import ContextClient
+from slice.service.slice_grouper.MetricsExporter import MetricsExporter
+
+SETTING_NAME_SLICE_GROUPING = 'SLICE_GROUPING'
+TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'}
+
+NO_ISOLATION = IsolationLevelEnum.NO_ISOLATION
+
+def is_slice_grouping_enabled() -> bool:
+    is_enabled = get_setting(SETTING_NAME_SLICE_GROUPING, default=None)
+    if is_enabled is None: return False
+    str_is_enabled = str(is_enabled).upper()
+    return str_is_enabled in TRUE_VALUES
+
+def create_slice_group(
+    context_uuid : str, slice_name : str, capacity_gbps : float, availability : float
+) -> Slice:
+    slice_group_obj = Slice()
+    slice_group_obj.slice_id.context_id.context_uuid.uuid = context_uuid            # pylint: disable=no-member
+    slice_group_obj.slice_id.slice_uuid.uuid = slice_name                           # pylint: disable=no-member
+    slice_group_obj.name = slice_name
+    slice_group_obj.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE  # pylint: disable=no-member
+    #del slice_group_obj.slice_endpoint_ids[:] # no endpoints initially
+    #del slice_group_obj.slice_service_ids[:] # no sub-services
+    #del slice_group_obj.slice_subslice_ids[:] # no sub-slices
+    #del slice_group_obj.slice_config.config_rules[:] # no config rules
+    slice_group_obj.slice_owner.owner_uuid.uuid = 'TeraFlowSDN'                     # pylint: disable=no-member
+    slice_group_obj.slice_owner.owner_string = 'TeraFlowSDN'                        # pylint: disable=no-member
+
+    constraint_sla_capacity = slice_group_obj.slice_constraints.add()               # pylint: disable=no-member
+    constraint_sla_capacity.sla_capacity.capacity_gbps = capacity_gbps
+
+    constraint_sla_availability = slice_group_obj.slice_constraints.add()           # pylint: disable=no-member
+    constraint_sla_availability.sla_availability.num_disjoint_paths = 1
+    constraint_sla_availability.sla_availability.all_active = True
+    constraint_sla_availability.sla_availability.availability = availability
+
+    constraint_sla_isolation = slice_group_obj.slice_constraints.add()              # pylint: disable=no-member
+    constraint_sla_isolation.sla_isolation.isolation_level.append(NO_ISOLATION)
+
+    return slice_group_obj
+
+def create_slice_groups(
+    slice_groups : List[Tuple[str, float, float]], context_uuid : str = DEFAULT_CONTEXT_NAME
+) -> Dict[str, SliceId]:
+    context_client = ContextClient()
+    create_context(context_client, context_uuid)
+
+    slice_group_ids : Dict[str, SliceId] = dict()
+    for slice_group in slice_groups:
+        slice_group_name = slice_group[0]
+        slice_group_obj = get_slice(context_client, slice_group_name, DEFAULT_CONTEXT_NAME)
+        if slice_group_obj is None:
+            slice_group_obj = create_slice_group(
+                DEFAULT_CONTEXT_NAME, slice_group_name, slice_group[2], slice_group[1])
+            slice_group_id = context_client.SetSlice(slice_group_obj)
+            slice_group_ids[slice_group_name] = slice_group_id
+        else:
+            slice_group_ids[slice_group_name] = slice_group_obj.slice_id
+
+    return slice_group_ids
+
+def get_slice_grouping_parameters(slice_obj : Slice) -> Optional[Tuple[float, float]]:
+    isolation_levels : Set[int] = set()
+    availability : Optional[float] = None
+    capacity_gbps : Optional[float] = None
+
+    for constraint in slice_obj.slice_constraints:
+        kind = constraint.WhichOneof('constraint')
+        if kind == 'sla_isolation':
+            isolation_levels.update(constraint.sla_isolation.isolation_level)
+        elif kind == 'sla_capacity':
+            capacity_gbps = constraint.sla_capacity.capacity_gbps
+        elif kind == 'sla_availability':
+            availability = constraint.sla_availability.availability
+        else:
+            continue
+
+    no_isolation_level = len(isolation_levels) == 0
+    single_isolation_level = len(isolation_levels) == 1
+    has_no_isolation_level = NO_ISOLATION in isolation_levels
+    can_be_grouped = no_isolation_level or (single_isolation_level and has_no_isolation_level)
+    if not can_be_grouped: return None
+    if availability is None: return None
+    if capacity_gbps is None: return None
+    return availability, capacity_gbps
+
+def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool:
+    group_name, availability, capacity_gbps = selected_group
+    slice_uuid = slice_obj.slice_id.slice_uuid.uuid
+
+    context_client = ContextClient()
+    slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
+    if slice_group_obj is None:
+        raise NotFoundException('Slice', group_name, extra_details='while adding to group')
+
+    for subslice_id in slice_group_obj.slice_subslice_ids:
+        if subslice_id == slice_obj.slice_id: break # already added
+    else:
+        slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id)
+        # TODO: add other logic, such as re-configure parent slice
+
+    context_client.SetSlice(slice_group_obj)
+
+    metrics_exporter = MetricsExporter()
+    metrics_exporter.export_point(
+        slice_uuid, group_name, availability, capacity_gbps, is_center=False)
+
+    return True
+
+def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool:
+    group_name, _, _ = selected_group
+    slice_uuid = slice_obj.slice_id.slice_uuid.uuid
+
+    context_client = ContextClient()
+    slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True)
+    if slice_group_obj is None:
+        raise NotFoundException('Slice', group_name, extra_details='while removing from group')
+
+    if slice_obj.slice_id in slice_group_obj.slice_subslice_ids:
+        slice_group_obj.slice_subslice_ids.remove(slice_obj.slice_id)
+        # TODO: other logic, such as deconfigure parent slice
+
+        tmp_slice_group_obj = Slice()
+        tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id) # pylint: disable=no-member
+        slice_subslice_id = tmp_slice_group_obj.slice_subslice_ids.add() # pylint: disable=no-member
+        slice_subslice_id.CopyFrom(slice_obj.slice_id)
+        context_client.UnsetSlice(tmp_slice_group_obj)
+
+    metrics_exporter = MetricsExporter()
+    metrics_exporter.delete_point(slice_uuid)
+    return True
diff --git a/src/slice/service/slice_grouper/__init__.py b/src/slice/service/slice_grouper/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/slice/service/slice_grouper/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/webui/grafana_db_slc_grps_psql.json b/src/webui/grafana_db_slc_grps_psql.json
new file mode 100644
index 0000000000000000000000000000000000000000..6aa7a478b6a19a83fa1677579163859eca6dd348
--- /dev/null
+++ b/src/webui/grafana_db_slc_grps_psql.json
@@ -0,0 +1,176 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "id": null,
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": {
+            "type": "grafana",
+            "uid": "-- Grafana --"
+          },
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "target": {
+            "limit": 100,
+            "matchAny": false,
+            "tags": [],
+            "type": "dashboard"
+          },
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "fiscalYearStartMonth": 0,
+    "graphTooltip": 0,
+    "links": [],
+    "liveNow": false,
+    "panels": [
+      {
+        "datasource": {
+          "type": "postgres",
+          "uid": "questdb-slc-grp"
+        },
+        "gridPos": {
+          "h": 21,
+          "w": 11,
+          "x": 0,
+          "y": 0
+        },
+        "id": 2,
+        "options": {
+          "ReferenceLines": [],
+          "border": {
+            "color": "yellow",
+            "size": 0
+          },
+          "fieldSets": [
+            {
+              "col": 6,
+              "color": "#C4162A",
+              "colorCol": 3,
+              "dotSize": 2,
+              "hidden": false,
+              "lineSize": 1,
+              "lineType": "none",
+              "polynomialOrder": 3,
+              "sizeCol": -7
+            },
+            {
+              "col": 5,
+              "color": "#edcd7d",
+              "colorCol": 3,
+              "dotSize": 2,
+              "hidden": false,
+              "lineSize": 1,
+              "lineType": "none",
+              "polynomialOrder": 3,
+              "sizeCol": -2
+            }
+          ],
+          "grid": {
+            "color": "gray"
+          },
+          "label": {
+            "col": -1,
+            "color": "#CCC",
+            "textSize": 2
+          },
+          "legend": {
+            "show": false,
+            "size": 0
+          },
+          "xAxis": {
+            "col": 4,
+            "inverted": false
+          },
+          "xAxisExtents": {
+            "min": 0,
+            "max": 100
+          },
+          "xAxisTitle": {
+            "text": "Availability %",
+            "color": "white",
+            "textSize": 2,
+            "rotated": false,
+            "logScale": false,
+            "fontSize": 4,
+            "fontColor": "white"
+          },
+          "xMargins": {
+            "lower": 30,
+            "upper": 10
+          },
+          "yAxisExtents": {
+            "min": 0,
+            "max": 100
+          },
+          "yAxisTitle": {
+            "text": "Capacity Gb/s",
+            "color": "#ccccdc",
+            "textSize": 2,
+            "rotated": true,
+            "logScale": false,
+            "fontSize": 4,
+            "fontColor": "white"
+          },
+          "yMargins": {
+            "lower": 20,
+            "upper": 20
+          }
+        },
+        "targets": [
+          {
+            "datasource": {
+              "type": "postgres",
+              "uid": "questdb-slc-grp"
+            },
+            "format": "table",
+            "group": [],
+            "hide": false,
+            "metricColumn": "none",
+            "rawQuery": true,
+            "rawSql": "SELECT timestamp as \"time\", slice_uuid, slice_group, slice_color, slice_availability, slice_capacity, slice_capacity_center, is_deleted\nFROM tfs_slice_groups\nWHERE $__timeFilter(timestamp) AND is_deleted <> 'true';",
+            "refId": "A",
+            "select": [
+              [
+                {
+                  "params": [
+                    "value"
+                  ],
+                  "type": "column"
+                }
+              ]
+            ],
+            "table": "tfs_slice_groups",
+            "timeColumn": "timestamp",
+            "where": []
+          }
+        ],
+        "title": "Slice Groups",
+        "transformations": [],
+        "type": "michaeldmoore-scatter-panel"
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 36,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": []
+    },
+    "time": {
+      "from": "now-30m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "Slice Grouping",
+    "uid": "tfs-slice-grps",
+    "version": 2,
+    "weekStart": ""
+  }
+}
diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e0020b04f33152de382f5b93af9735f8d737f92
--- /dev/null
+++ b/src/webui/service/load_gen/forms.py
@@ -0,0 +1,42 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from flask_wtf import FlaskForm
+from wtforms import BooleanField, FloatField, IntegerField, StringField, SubmitField
+from wtforms.validators import DataRequired, NumberRange
+
+class LoadGenForm(FlaskForm):
+    num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)])
+    num_generated = IntegerField('Num Generated', default=0, render_kw={'readonly': True})
+
+    request_type_service_l2nm = BooleanField('Service L2NM', default=False)
+    request_type_service_l3nm = BooleanField('Service L3NM', default=False)
+    request_type_service_mw = BooleanField('Service MW', default=False)
+    request_type_service_tapi = BooleanField('Service TAPI', default=False)
+    request_type_slice_l2nm = BooleanField('Slice L2NM', default=True)
+    request_type_slice_l3nm = BooleanField('Slice L3NM', default=False)
+
+    offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=0.0)])
+    holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)])
+    inter_arrival_time = FloatField('Inter Arrival Time [seconds]', default=0, validators=[NumberRange(min=0.0)])
+
+    do_teardown = BooleanField('Do Teardown', default=True)
+
+    record_to_dlt = BooleanField('Record to DLT', default=False)
+    dlt_domain_id = StringField('DLT Domain Id', default='')
+
+    infinite_loop = BooleanField('Infinite Loop', default=False, render_kw={'disabled': True})
+    running = BooleanField('Running', default=False, render_kw={'disabled': True})
+
+    submit = SubmitField('Start/Stop')
diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py
index 3118b6de0e061adac65be178163623cd2d1d8fff..5f47f06b0ff59ad1383aab94caa41adc08440c87 100644
--- a/src/webui/service/load_gen/routes.py
+++ b/src/webui/service/load_gen/routes.py
@@ -12,34 +12,115 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from flask import render_template, Blueprint, flash
+from typing import Any, Optional
+from flask import redirect, render_template, Blueprint, flash, url_for
 from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2 import Parameters, RequestTypeEnum
 from load_generator.client.LoadGeneratorClient import LoadGeneratorClient
+from .forms import LoadGenForm
 
 load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen')
 
-@load_gen.route('start', methods=['GET'])
-def start():
+def set_properties(field, data : Any, readonly : Optional[bool] = None, disabled : Optional[bool] = None) -> None:
+    if not hasattr(field, 'render_kw'):
+        field.render_kw = dict()
+    elif field.render_kw is None:
+        field.render_kw = dict()
+
+    if readonly is not None:
+        field.render_kw['readonly'] = readonly
+    if disabled is not None:
+        field.render_kw['disabled'] = disabled
+
+    if (readonly is not None and readonly) or (disabled is not None and disabled):
+        field.data = data
+
+@load_gen.route('home', methods=['GET'])
+def home():
     load_gen_client = LoadGeneratorClient()
-    try:
-        load_gen_client.connect()
-        load_gen_client.Start(Empty())
-        load_gen_client.close()
-        flash('Load Generator Started.', 'success')
-    except Exception as e: # pylint: disable=broad-except
-        flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger')
 
-    return render_template('main/debug.html')
+    load_gen_client.connect()
+    status = load_gen_client.GetStatus(Empty())
+    load_gen_client.close()
+
+    request_types = status.parameters.request_types
+    _request_type_service_l2nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM in request_types
+    _request_type_service_l3nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM in request_types
+    _request_type_service_mw   = RequestTypeEnum.REQUESTTYPE_SERVICE_MW   in request_types
+    _request_type_service_tapi = RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI in request_types
+    _request_type_slice_l2nm   = RequestTypeEnum.REQUESTTYPE_SLICE_L2NM   in request_types
+    _request_type_slice_l3nm   = RequestTypeEnum.REQUESTTYPE_SLICE_L3NM   in request_types
+
+    _offered_load       = round(status.parameters.offered_load       , ndigits=4)
+    _holding_time       = round(status.parameters.holding_time       , ndigits=4)
+    _inter_arrival_time = round(status.parameters.inter_arrival_time , ndigits=4)
+
+    form = LoadGenForm()
+    set_properties(form.num_requests             , status.parameters.num_requests , readonly=status.running)
+    set_properties(form.offered_load             , _offered_load                  , readonly=status.running)
+    set_properties(form.holding_time             , _holding_time                  , readonly=status.running)
+    set_properties(form.inter_arrival_time       , _inter_arrival_time            , readonly=status.running)
+    set_properties(form.do_teardown              , status.parameters.do_teardown  , disabled=status.running)
+    set_properties(form.record_to_dlt            , status.parameters.record_to_dlt, disabled=status.running)
+    set_properties(form.dlt_domain_id            , status.parameters.dlt_domain_id, readonly=status.running)
+    set_properties(form.request_type_service_l2nm, _request_type_service_l2nm     , disabled=status.running)
+    set_properties(form.request_type_service_l3nm, _request_type_service_l3nm     , disabled=status.running)
+    set_properties(form.request_type_service_mw  , _request_type_service_mw       , disabled=status.running)
+    set_properties(form.request_type_service_tapi, _request_type_service_tapi     , disabled=status.running)
+    set_properties(form.request_type_slice_l2nm  , _request_type_slice_l2nm       , disabled=status.running)
+    set_properties(form.request_type_slice_l3nm  , _request_type_slice_l3nm       , disabled=status.running)
+    set_properties(form.num_generated            , status.num_generated           , disabled=True)
+    set_properties(form.infinite_loop            , status.infinite_loop           , disabled=True)
+    set_properties(form.running                  , status.running                 , disabled=True)
 
-@load_gen.route('stop', methods=['GET'])
+    form.submit.label.text = 'Stop' if status.running else 'Start'
+    form_action = url_for('load_gen.stop') if status.running else url_for('load_gen.start')
+    return render_template('load_gen/home.html', form=form, form_action=form_action)
+
+@load_gen.route('start', methods=['POST'])
+def start():
+    form = LoadGenForm()
+    if form.validate_on_submit():
+        try:
+            load_gen_params = Parameters()
+            load_gen_params.num_requests       = form.num_requests.data
+            load_gen_params.offered_load       = form.offered_load.data
+            load_gen_params.holding_time       = form.holding_time.data
+            load_gen_params.inter_arrival_time = form.inter_arrival_time.data
+            load_gen_params.do_teardown        = form.do_teardown.data
+            load_gen_params.dry_mode           = False
+            load_gen_params.record_to_dlt      = form.record_to_dlt.data
+            load_gen_params.dlt_domain_id      = form.dlt_domain_id.data
+
+            del load_gen_params.request_types[:] # pylint: disable=no-member
+            request_types = list()
+            if form.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM)
+            if form.request_type_service_l3nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM)
+            if form.request_type_service_mw  .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_MW  )
+            if form.request_type_service_tapi.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI)
+            if form.request_type_slice_l2nm  .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L2NM  )
+            if form.request_type_slice_l3nm  .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L3NM  )
+            load_gen_params.request_types.extend(request_types) # pylint: disable=no-member
+
+            load_gen_client = LoadGeneratorClient()
+            load_gen_client.connect()
+            load_gen_client.Start(load_gen_params)
+            load_gen_client.close()
+            flash('Load Generator Started.', 'success')
+        except Exception as e: # pylint: disable=broad-except
+            flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger')
+    return redirect(url_for('load_gen.home'))
+
+@load_gen.route('stop', methods=['POST'])
 def stop():
-    load_gen_client = LoadGeneratorClient()
-    try:
-        load_gen_client.connect()
-        load_gen_client.Stop(Empty())
-        load_gen_client.close()
-        flash('Load Generator Stoped.', 'success')
-    except Exception as e: # pylint: disable=broad-except
-        flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger')
-
-    return render_template('main/debug.html')
+    form = LoadGenForm()
+    if form.validate_on_submit():
+        try:
+            load_gen_client = LoadGeneratorClient()
+            load_gen_client.connect()
+            load_gen_client.Stop(Empty())
+            load_gen_client.close()
+            flash('Load Generator Stopped.', 'success')
+        except Exception as e: # pylint: disable=broad-except
+            flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger')
+    return redirect(url_for('load_gen.home'))
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 35999ebe1785a033097dd30bfd672ce3b9a91a87..1dfa3687198d8a33db346ba2bbcd2989f6f109bb 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -86,10 +86,16 @@
                 <li class="nav-item">
                   <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
                 </li>
-  
                 <li class="nav-item">
                   <a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
                 </li>
+                <li class="nav-item">
+                  {% if '/load-gen/' in request.path %}
+                  <a class="nav-link active" aria-current="page" href="{{ url_for('load_gen.home') }}">Load Generator</a>
+                  {% else %}
+                  <a class="nav-link" href="{{ url_for('load_gen.home') }}">Load Generator</a>
+                  {% endif %}
+                </li>
   
                 <!-- <li class="nav-item">
                   <a class="nav-link" href="#">Context</a>
diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html
new file mode 100644
index 0000000000000000000000000000000000000000..d58f42601925ca438ab9d9f20b32f94960b5cada
--- /dev/null
+++ b/src/webui/service/templates/load_gen/home.html
@@ -0,0 +1,155 @@
+<!--
+ Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+    <h1>Load Generator</h1>
+    <br />
+
+    <form id="load_gen_form" method="POST" action="{{ form_action }}">
+        {{ form.hidden_tag() }}
+        <fieldset>
+            <div class="row mb-3">
+                {{ form.num_requests.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.num_requests.errors %}
+                        {{ form.num_requests(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.num_requests.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.num_requests(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.num_generated.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.num_generated.errors %}
+                        {{ form.num_generated(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.num_generated.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.num_generated(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-2 col-form-label">Service Types:</div>
+                <div class="col-sm-10">
+                    {{ form.request_type_slice_l2nm   }} {{ form.request_type_slice_l2nm  .label(class="col-sm-3 col-form-label") }}
+                    {{ form.request_type_slice_l3nm   }} {{ form.request_type_slice_l3nm  .label(class="col-sm-3 col-form-label") }}
+                    <br/>
+                    {{ form.request_type_service_l2nm }} {{ form.request_type_service_l2nm.label(class="col-sm-3 col-form-label") }}
+                    {{ form.request_type_service_l3nm }} {{ form.request_type_service_l3nm.label(class="col-sm-3 col-form-label") }}
+                    <br/>
+                    {{ form.request_type_service_mw   }} {{ form.request_type_service_mw  .label(class="col-sm-3 col-form-label") }}
+                    {{ form.request_type_service_tapi }} {{ form.request_type_service_tapi.label(class="col-sm-3 col-form-label") }}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.offered_load.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.offered_load.errors %}
+                        {{ form.offered_load(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.offered_load.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.offered_load(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.holding_time.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.holding_time.errors %}
+                        {{ form.holding_time(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.holding_time.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.holding_time(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.inter_arrival_time.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.inter_arrival_time.errors %}
+                        {{ form.inter_arrival_time(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.inter_arrival_time.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.inter_arrival_time(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-10">
+                    {{ form.do_teardown }} {{ form.do_teardown.label(class="col-sm-3 col-form-label") }}<br/>
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-2 col-form-label">DLT Settings:</div>
+                <div class="col-sm-10">
+                    {{ form.record_to_dlt }} {{ form.record_to_dlt.label(class="col-sm-3 col-form-label") }} <br/>
+                    {{ form.dlt_domain_id.label(class="col-sm-2 col-form-label") }}
+                    {% if form.dlt_domain_id.errors %}
+                        {{ form.dlt_domain_id(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.dlt_domain_id.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.dlt_domain_id(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                <div class="col-sm-2 col-form-label">Status:</div>
+                <div class="col-sm-10">
+                    {{ form.infinite_loop }} {{ form.infinite_loop.label(class="col-sm-3 col-form-label") }}
+                    {{ form.running }} {{ form.running.label(class="col-sm-3 col-form-label") }}
+                </div>
+            </div>
+            <br />
+
+            <div class="d-grid gap-2 d-md-flex justify-content-md-start">
+                {{ form.submit(class="btn btn-primary") }}
+            </div>
+        </fieldset>
+    </form>
+
+{% endblock %}
diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html
index 11a868fdff9f5ee1bcbf22936ae0283d4ccc5715..eef42ae9a9f4cf386d26da0449681bab75f33b41 100644
--- a/src/webui/service/templates/main/debug.html
+++ b/src/webui/service/templates/main/debug.html
@@ -17,26 +17,12 @@
 {% extends 'base.html' %}
 
 {% block content %}
-    <h1>Debug</h1>
+    <h1>Debug API</h1>
 
-    <!--
-        <h3>Dump ContextDB:</h3>
-        <ul>
-            <li>
-                <a class="nav-link" href="/context/api/dump/html" id="context_html_link" target="context_html">
-                    as HTML
-                </a>
-            </li>
-            <li>
-                <a class="nav-link" href="/context/api/dump/text" id="context_text_link" target="context_text">
-                    as Text
-                </a>
-            </li>
-        </ul>
-    -->
-
-    <h3>Load Generator:</h3>
-    <a href="{{ url_for('load_gen.start') }}" class="btn btn-primary" style="margin-bottom: 10px;">Start</a>
-    <a href="{{ url_for('load_gen.stop') }}" class="btn btn-primary" style="margin-bottom: 10px;">Stop</a>
+    <ul>
+        <li><a class="nav-link" href="/restconf/debug-api/contexts" id="contexts_link" target="contexts">Contexts</a></li>
+        <li><a class="nav-link" href="/restconf/debug-api/devices" id="devices_link" target="devices">Devices</a></li>
+        <li><a class="nav-link" href="/restconf/debug-api/links" id="links_link" target="links">Links</a></li>
+    </ul>
 
 {% endblock %}
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html
index b267f986c5e80af9f26b7beb060b284e2eb5b4d5..d99ede3e02c9716782317efc60fcc8d92e2e811a 100644
--- a/src/webui/service/templates/service/detail.html
+++ b/src/webui/service/templates/service/detail.html
@@ -157,6 +157,7 @@
             <td>SLA Availability</td>
             <td>-</td>
             <td>
+                {{ constraint.sla_availability.availability }} %;
                 {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
                 {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active
             </td>
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
index 2c1b55afb84bbe9d5dde92fe574cca094040b7af..6c8d15aed6fcf91580e9fa3bfe9f2f9a14e7666b 100644
--- a/src/webui/service/templates/slice/detail.html
+++ b/src/webui/service/templates/slice/detail.html
@@ -157,6 +157,7 @@
             <td>SLA Availability</td>
             <td>-</td>
             <td>
+                {{ constraint.sla_availability.availability }} %;
                 {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
                 {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active
             </td>