diff --git a/hackfest/mock_osm/__main__.py b/hackfest/mock_osm/__main__.py
index 669da2b5e6a1729f35d2958f2d7aa68c0413287d..4ed25eaedbf4eba1f04ea41c72a751ecd7d6380b 100644
--- a/hackfest/mock_osm/__main__.py
+++ b/hackfest/mock_osm/__main__.py
@@ -58,13 +58,11 @@ SERVICE_CONNECTION_POINTS = [
 class MockOSMShell(cmd.Cmd):
     intro = 'Welcome to the MockOSM shell.\nType help or ? to list commands.\n'
     prompt = '(mock-osm) '
-    file = None
 
     def __init__(self, *args, **kwargs) -> None:
         super().__init__(*args, **kwargs)
         self.mock_osm = MockOSM(WIM_URL, WIM_PORT_MAPPING, WIM_USERNAME, WIM_PASSWORD)
 
-    # ----- basic turtle commands -----
     def do_create(self, arg):
         'Create an ELINE (L2) service'
         service_uuid = self.mock_osm.create_connectivity_service(
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index e5757874b7e241d7c6b0bd050ac2aae47a1610e3..49e2b5943d20586941f80e8fc4b5c32c99d70f8e 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -38,7 +38,7 @@ spec:
         - name: LOG_LEVEL
           value: "INFO"
         - name: SLICE_GROUPING
-          value: "ENABLE"
+          value: "DISABLE"
         envFrom:
         - secretRef:
             name: qdb-data
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index 1a8936ed4025586bf4de280b64cf2008b14c1a50..b89fa2207d1cd69e30612e8cecc8aa0f325e9dd3 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -113,7 +113,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
             location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id)
         num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()}
         num_disjoint_paths = min(num_endpoints_per_location)
-        update_constraint_sla_availability(constraints, num_disjoint_paths, all_active)
+        update_constraint_sla_availability(constraints, num_disjoint_paths, all_active, 0.0)
 
     return target
 
diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py
index dd60441ca70329b9431188e28c21d98d941ada14..09723cc6f6b31e2496bf5ab475f50d0aa58f95c2 100644
--- a/src/context/service/database/ConfigRule.py
+++ b/src/context/service/database/ConfigRule.py
@@ -80,7 +80,7 @@ def compose_config_rules_data(
     return dict_config_rules
 
 def upsert_config_rules(
-    session : Session, config_rules : List[Dict],
+    session : Session, config_rules : List[Dict], is_delete : bool = False,
     device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None,
 ) -> bool:
     uuids_to_delete : Set[str] = set()
@@ -89,7 +89,9 @@ def upsert_config_rules(
     for config_rule in config_rules:
         configrule_uuid = config_rule['configrule_uuid']
         configrule_action = config_rule['action']
-        if configrule_action == ORM_ConfigActionEnum.SET:
+        if is_delete or configrule_action == ORM_ConfigActionEnum.DELETE:
+            uuids_to_delete.add(configrule_uuid)
+        elif configrule_action == ORM_ConfigActionEnum.SET:
             position = uuids_to_upsert.get(configrule_uuid)
             if position is None:
                 # if not added, add it
@@ -98,8 +100,6 @@ def upsert_config_rules(
             else:
                 # if already added, update occurrence
                 rules_to_upsert[position] = config_rule
-        elif configrule_action == ORM_ConfigActionEnum.DELETE:
-            uuids_to_delete.add(configrule_uuid)
         else:
             MSG = 'Action for ConfigRule({:s}) is not supported '+\
                   '(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})'
diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py
index b37d0dcadd8799f8f7f9d538a135ed0404e08684..3a73f6589f9332aa4c84f8f296f2cb56db3048bf 100644
--- a/src/context/service/database/Constraint.py
+++ b/src/context/service/database/Constraint.py
@@ -81,7 +81,7 @@ def compose_constraints_data(
     return dict_constraints
 
 def upsert_constraints(
-    session : Session, constraints : List[Dict],
+    session : Session, constraints : List[Dict], is_delete : bool = False,
     service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None
 ) -> bool:
     uuids_to_upsert : Dict[str, int] = dict()
@@ -111,7 +111,7 @@ def upsert_constraints(
         delete_affected = int(constraint_deletes.rowcount) > 0
 
     upsert_affected = False
-    if len(constraints) > 0:
+    if not is_delete and len(constraints) > 0:
         stmt = insert(ConstraintModel).values(constraints)
         stmt = stmt.on_conflict_do_update(
             index_elements=[ConstraintModel.constraint_uuid],
diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py
index a100103890f293d418b4c70a7948ad9687ffe5b3..e95cec4ae533795b23b8fd4e2f26ac9000c1bcce 100644
--- a/src/context/service/database/PolicyRule.py
+++ b/src/context/service/database/PolicyRule.py
@@ -65,7 +65,7 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule
 
     policyrule_kind  = PolicyRuleKindEnum._member_map_.get(policyrule_kind.upper()) # pylint: disable=no-member
     policyrule_state = grpc_to_enum__policyrule_state(policyrule_basic.policyRuleState.policyRuleState)
-    policyrule_state_message = policyrule_basic.policyRuleState.policyRuleStateMessage
+    policyrule_state_msg = policyrule_basic.policyRuleState.policyRuleStateMessage
 
     json_policyrule_basic = grpc_message_to_json(policyrule_basic)
     policyrule_eca_data = json.dumps({
@@ -77,15 +77,15 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule
     now = datetime.datetime.utcnow()
 
     policyrule_data = [{
-        'policyrule_uuid'         : policyrule_uuid,
-        'policyrule_kind'         : policyrule_kind,
-        'policyrule_state'        : policyrule_state,
-        'policyrule_state_message': policyrule_state_message,
-        'policyrule_priority'     : policyrule_basic.priority,
-        'policyrule_eca_data'     : policyrule_eca_data,
-        'created_at'              : now,
-        'updated_at'              : now,
-    }]
+        'policyrule_uuid'     : policyrule_uuid,
+        'policyrule_kind'     : policyrule_kind,
+        'policyrule_state'    : policyrule_state,
+        'policyrule_state_msg': policyrule_state_msg,
+        'policyrule_priority' : policyrule_basic.priority,
+        'policyrule_eca_data' : policyrule_eca_data,
+        'created_at'          : now,
+        'updated_at'          : now,
+    }] 
 
     policyrule_service_uuid = None
     if policyrule_kind == PolicyRuleKindEnum.SERVICE:
@@ -108,11 +108,11 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule
         stmt = stmt.on_conflict_do_update(
             index_elements=[PolicyRuleModel.policyrule_uuid],
             set_=dict(
-                policyrule_state         = stmt.excluded.policyrule_state,
-                policyrule_state_message = stmt.excluded.policyrule_state_message,
-                policyrule_priority      = stmt.excluded.policyrule_priority,
-                policyrule_eca_data      = stmt.excluded.policyrule_eca_data,
-                updated_at               = stmt.excluded.updated_at,
+                policyrule_state     = stmt.excluded.policyrule_state,
+                policyrule_state_msg = stmt.excluded.policyrule_state_msg,
+                policyrule_priority  = stmt.excluded.policyrule_priority,
+                policyrule_eca_data  = stmt.excluded.policyrule_eca_data,
+                updated_at           = stmt.excluded.updated_at,
             )
         )
         stmt = stmt.returning(PolicyRuleModel.created_at, PolicyRuleModel.updated_at)
diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py
index 80af759defac88d69791b610b9bc093fef309db1..1d6781d53f7c85d8cb878b1b38b0de65b4ef5726 100644
--- a/src/context/service/database/Slice.py
+++ b/src/context/service/database/Slice.py
@@ -178,10 +178,6 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
     slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name
     context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=False)
 
-    if len(request.slice_constraints) > 0:         raise NotImplementedError('UnsetSlice: removal of constraints')
-    if len(request.slice_config.config_rules) > 0: raise NotImplementedError('UnsetSlice: removal of config rules')
-    #if len(request.slice_endpoint_ids) > 0:        raise NotImplementedError('UnsetSlice: removal of endpoints')
-
     slice_endpoint_uuids : Set[str] = set()
     for i,endpoint_id in enumerate(request.slice_endpoint_ids):
         endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
@@ -203,6 +199,10 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
         for subslice_id in request.slice_subslice_ids
     }
 
+    now = datetime.datetime.utcnow()
+    constraints = compose_constraints_data(request.slice_constraints, now, slice_uuid=slice_uuid)
+    config_rules = compose_config_rules_data(request.slice_config.config_rules, now, slice_uuid=slice_uuid)
+
     def callback(session : Session) -> bool:
         num_deletes = 0
         if len(slice_service_uuids) > 0:
@@ -223,7 +223,11 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
                     SliceEndPointModel.slice_uuid == slice_uuid,
                     SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids)
                 )).delete()
-        return num_deletes > 0
+
+        changed_constraints = upsert_constraints(session, constraints, is_delete=True, slice_uuid=slice_uuid)
+        changed_config_rules = upsert_config_rules(session, config_rules, is_delete=True, slice_uuid=slice_uuid)
+
+        return num_deletes > 0 or changed_constraints or changed_config_rules
 
     updated = run_transaction(sessionmaker(bind=db_engine), callback)
     return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated
diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py
index 363611105135661ccf3bd001c2e65ab75f9b6a6c..d7bb97cd0fec1037e98c8713b885b2d5141cae63 100644
--- a/src/context/service/database/models/ConfigRuleModel.py
+++ b/src/context/service/database/models/ConfigRuleModel.py
@@ -28,9 +28,9 @@ class ConfigRuleModel(_Base):
     __tablename__ = 'configrule'
 
     configrule_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    device_uuid     = Column(ForeignKey('device.device_uuid',   ondelete='CASCADE'), nullable=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True)
-    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid',   ondelete='CASCADE'), nullable=True, index=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True, index=True)
     position        = Column(Integer, nullable=False)
     kind            = Column(Enum(ConfigRuleKindEnum), nullable=False)
     action          = Column(Enum(ORM_ConfigActionEnum), nullable=False)
diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py
index c2b20de202cbeb065ffd50683d015729c76af9bc..156e33c6bb32e237af241035f1d9672b0b419222 100644
--- a/src/context/service/database/models/ConnectionModel.py
+++ b/src/context/service/database/models/ConnectionModel.py
@@ -25,7 +25,7 @@ class ConnectionModel(_Base):
     __tablename__ = 'connection'
 
     connection_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False, index=True)
     settings        = Column(String, nullable=False)
     created_at      = Column(DateTime, nullable=False)
     updated_at      = Column(DateTime, nullable=False)
@@ -56,7 +56,7 @@ class ConnectionEndPointModel(_Base):
     __tablename__ = 'connection_endpoint'
 
     connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid',     ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
     position        = Column(Integer, nullable=False)
 
     connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined')
@@ -70,7 +70,7 @@ class ConnectionSubServiceModel(_Base):
     __tablename__ = 'connection_subservice'
 
     connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
-    subservice_uuid = Column(ForeignKey('service.service_uuid',       ondelete='RESTRICT'), primary_key=True)
+    subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined')
     subservice = relationship('ServiceModel',    lazy='joined') # back_populates='connection_subservices'
diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py
index e9660d502c4420ec69c2bdc883d5e03ef283ca54..2412080c1a2883e7bed85e6e22f389270b3f73bc 100644
--- a/src/context/service/database/models/ConstraintModel.py
+++ b/src/context/service/database/models/ConstraintModel.py
@@ -35,8 +35,8 @@ class ConstraintModel(_Base):
     __tablename__ = 'constraint'
 
     constraint_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True)
-    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True, index=True)
     position        = Column(Integer, nullable=False)
     kind            = Column(Enum(ConstraintKindEnum), nullable=False)
     data            = Column(String, nullable=False)
diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py
index e591bc718711c6e0b8219eb60ce68c42f35a800c..12ba7e10e7c3d5789f9bf16ad7b4f50c35a36bf5 100644
--- a/src/context/service/database/models/EndPointModel.py
+++ b/src/context/service/database/models/EndPointModel.py
@@ -23,8 +23,8 @@ class EndPointModel(_Base):
     __tablename__ = 'endpoint'
 
     endpoint_uuid    = Column(UUID(as_uuid=False), primary_key=True)
-    device_uuid      = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), nullable=False)
-    topology_uuid    = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False)
+    device_uuid      = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), nullable=False, index=True)
+    topology_uuid    = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False, index=True)
     name             = Column(String, nullable=False)
     endpoint_type    = Column(String, nullable=False)
     kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1))
diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py
index 49c62d376624dc02b51a2b56860b04c322d66934..ee591f5c8404cd7f0f6c97651b5f731a51c43303 100644
--- a/src/context/service/database/models/LinkModel.py
+++ b/src/context/service/database/models/LinkModel.py
@@ -46,7 +46,7 @@ class LinkEndPointModel(_Base):
     __tablename__ = 'link_endpoint'
 
     link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     link     = relationship('LinkModel',     back_populates='link_endpoints', lazy='joined')
     endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints'
diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py
index 4059991e1f1af7851d9fced17739b92675261227..2f0c8a326a57a05ab1fd623a968dea0bc39d9e76 100644
--- a/src/context/service/database/models/PolicyRuleModel.py
+++ b/src/context/service/database/models/PolicyRuleModel.py
@@ -28,15 +28,15 @@ class PolicyRuleKindEnum(enum.Enum):
 class PolicyRuleModel(_Base):
     __tablename__ = 'policyrule'
 
-    policyrule_uuid          = Column(UUID(as_uuid=False), primary_key=True)
-    policyrule_kind          = Column(Enum(PolicyRuleKindEnum), nullable=False)
-    policyrule_state         = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False)
-    policyrule_state_message = Column(String, nullable=False)
-    policyrule_priority      = Column(Integer, nullable=False)
-    policyrule_service_uuid  = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True)
-    policyrule_eca_data      = Column(String, nullable=False)
-    created_at               = Column(DateTime, nullable=False)
-    updated_at               = Column(DateTime, nullable=False)
+    policyrule_uuid         = Column(UUID(as_uuid=False), primary_key=True)
+    policyrule_kind         = Column(Enum(PolicyRuleKindEnum), nullable=False)
+    policyrule_state        = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False)
+    policyrule_state_msg    = Column(String, nullable=False)
+    policyrule_priority     = Column(Integer, nullable=False)
+    policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True, index=True)
+    policyrule_eca_data     = Column(String, nullable=False)
+    created_at              = Column(DateTime, nullable=False)
+    updated_at              = Column(DateTime, nullable=False)
 
     policyrule_service = relationship('ServiceModel') # back_populates='policyrules'
     policyrule_devices = relationship('PolicyRuleDeviceModel' ) # back_populates='policyrule'
@@ -55,7 +55,7 @@ class PolicyRuleModel(_Base):
             'policyRuleId': self.dump_id(),
             'policyRuleState': {
                 'policyRuleState': self.policyrule_state.value,
-                'policyRuleStateMessage': self.policyrule_state_message,
+                'policyRuleStateMessage': self.policyrule_state_msg,
             },
             'priority': self.policyrule_priority,
         })
@@ -71,7 +71,7 @@ class PolicyRuleDeviceModel(_Base):
     __tablename__ = 'policyrule_device'
 
     policyrule_uuid = Column(ForeignKey('policyrule.policyrule_uuid', ondelete='RESTRICT'), primary_key=True)
-    device_uuid     = Column(ForeignKey('device.device_uuid',         ondelete='RESTRICT'), primary_key=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices'
     device     = relationship('DeviceModel',     lazy='joined') # back_populates='policyrule_devices'
diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py
index b581bf900a8861d9af199fef62bd218159b1e00e..09ff381b5eb374ea752590bba5403fe816319036 100644
--- a/src/context/service/database/models/ServiceModel.py
+++ b/src/context/service/database/models/ServiceModel.py
@@ -25,7 +25,7 @@ class ServiceModel(_Base):
     __tablename__ = 'service'
 
     service_uuid   = Column(UUID(as_uuid=False), primary_key=True)
-    context_uuid   = Column(ForeignKey('context.context_uuid'), nullable=False)
+    context_uuid   = Column(ForeignKey('context.context_uuid'), nullable=False, index=True)
     service_name   = Column(String, nullable=False)
     service_type   = Column(Enum(ORM_ServiceTypeEnum), nullable=False)
     service_status = Column(Enum(ORM_ServiceStatusEnum), nullable=False)
@@ -67,7 +67,7 @@ class ServiceEndPointModel(_Base):
     __tablename__ = 'service_endpoint'
 
     service_uuid  = Column(ForeignKey('service.service_uuid',   ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     service  = relationship('ServiceModel',  back_populates='service_endpoints', lazy='joined')
     endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints'
diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py
index 458bc714a3be42ee619fd2d182d734a4edd79628..2d6c884169154fee8d44c26464416c6708c650b1 100644
--- a/src/context/service/database/models/SliceModel.py
+++ b/src/context/service/database/models/SliceModel.py
@@ -24,7 +24,7 @@ class SliceModel(_Base):
     __tablename__ = 'slice'
 
     slice_uuid         = Column(UUID(as_uuid=False), primary_key=True)
-    context_uuid       = Column(ForeignKey('context.context_uuid'), nullable=False)
+    context_uuid       = Column(ForeignKey('context.context_uuid'), nullable=False, index=True)
     slice_name         = Column(String, nullable=True)
     slice_status       = Column(Enum(ORM_SliceStatusEnum), nullable=False)
     slice_owner_uuid   = Column(String, nullable=True)
@@ -81,7 +81,7 @@ class SliceEndPointModel(_Base):
     __tablename__ = 'slice_endpoint'
 
     slice_uuid    = Column(ForeignKey('slice.slice_uuid',       ondelete='CASCADE' ), primary_key=True)
-    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     slice    = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined')
     endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints'
@@ -90,7 +90,7 @@ class SliceServiceModel(_Base):
     __tablename__ = 'slice_service'
 
     slice_uuid   = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE' ), primary_key=True)
-    service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True)
+    service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
 
     slice   = relationship('SliceModel', back_populates='slice_services', lazy='joined')
     service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services'
@@ -98,8 +98,8 @@ class SliceServiceModel(_Base):
 class SliceSubSliceModel(_Base):
     __tablename__ = 'slice_subslice'
 
-    slice_uuid    = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True)
-    subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True)
+    slice_uuid    = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True)
+    subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True)
 
     slice    = relationship(
         'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined')
diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py
index 92802e5b2ddb4ed57342bbd244255b73b11c6cce..7dc2333f0a9b979f251c173d850a235dcb822d91 100644
--- a/src/context/service/database/models/TopologyModel.py
+++ b/src/context/service/database/models/TopologyModel.py
@@ -22,7 +22,7 @@ class TopologyModel(_Base):
     __tablename__ = 'topology'
 
     topology_uuid = Column(UUID(as_uuid=False), primary_key=True)
-    context_uuid  = Column(ForeignKey('context.context_uuid'), nullable=False)
+    context_uuid  = Column(ForeignKey('context.context_uuid'), nullable=False, index=True)
     topology_name = Column(String, nullable=False)
     created_at    = Column(DateTime, nullable=False)
     updated_at    = Column(DateTime, nullable=False)
@@ -56,8 +56,8 @@ class TopologyModel(_Base):
 class TopologyDeviceModel(_Base):
     __tablename__ = 'topology_device'
 
-    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True)
-    device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True)
+    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
+    device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True, index=True)
 
     #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices'
     device   = relationship('DeviceModel',   lazy='joined') # back_populates='topology_devices'
@@ -65,8 +65,8 @@ class TopologyDeviceModel(_Base):
 class TopologyLinkModel(_Base):
     __tablename__ = 'topology_link'
 
-    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True)
-    link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True)
+    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True)
+    link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True, index=True)
 
     #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links'
     link     = relationship('LinkModel',     lazy='joined') # back_populates='topology_links'
diff --git a/src/context/service/database/models/_Base.py b/src/context/service/database/models/_Base.py
index 4323fb7130462b13958627216c62f1fe4edc91c7..a10de60eb8731132ec815de1ff897c06ac12b665 100644
--- a/src/context/service/database/models/_Base.py
+++ b/src/context/service/database/models/_Base.py
@@ -13,10 +13,60 @@
 # limitations under the License.
 
 import sqlalchemy
-from sqlalchemy.orm import declarative_base
+from typing import Any, List
+from sqlalchemy.orm import Session, sessionmaker, declarative_base
+from sqlalchemy.sql import text
+from sqlalchemy_cockroachdb import run_transaction
 
 _Base = declarative_base()
 
+def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None:
+    def index_storing(
+        index_name : str, table_name : str, index_fields : List[str], storing_fields : List[str]
+    ) -> Any:
+        str_index_fields = ','.join(['"{:s}"'.format(index_field) for index_field in index_fields])
+        str_storing_fields = ','.join(['"{:s}"'.format(storing_field) for storing_field in storing_fields])
+        INDEX_STORING = 'CREATE INDEX IF NOT EXISTS {:s} ON "{:s}" ({:s}) STORING ({:s});'
+        return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields))
+
+    statements = [
+        index_storing('configrule_device_uuid_rec_idx', 'configrule', ['device_uuid'], [
+            'service_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('configrule_service_uuid_rec_idx', 'configrule', ['service_uuid'], [
+            'device_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('configrule_slice_uuid_rec_idx', 'configrule', ['slice_uuid'], [
+            'device_uuid', 'service_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('connection_service_uuid_rec_idx', 'connection', ['service_uuid'], [
+            'settings', 'created_at', 'updated_at'
+        ]),
+        index_storing('constraint_service_uuid_rec_idx', 'constraint', ['service_uuid'], [
+            'slice_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('constraint_slice_uuid_rec_idx', 'constraint', ['slice_uuid'], [
+            'service_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at'
+        ]),
+        index_storing('endpoint_device_uuid_rec_idx', 'endpoint', ['device_uuid'], [
+            'topology_uuid', 'name', 'endpoint_type', 'kpi_sample_types', 'created_at', 'updated_at'
+        ]),
+        index_storing('service_context_uuid_rec_idx', 'service', ['context_uuid'], [
+            'service_name', 'service_type', 'service_status', 'created_at', 'updated_at'
+        ]),
+        index_storing('slice_context_uuid_rec_idx', 'slice', ['context_uuid'], [
+            'slice_name', 'slice_status', 'slice_owner_uuid', 'slice_owner_string', 'created_at', 'updated_at'
+        ]),
+
+        index_storing('topology_context_uuid_rec_idx', 'topology', ['context_uuid'], [
+            'topology_name', 'created_at', 'updated_at'
+        ]),
+    ]
+    def callback(session : Session) -> bool:
+        for stmt in statements: session.execute(stmt)
+    run_transaction(sessionmaker(bind=db_engine), callback)
+
 def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False):
     if drop_if_exists: _Base.metadata.drop_all(db_engine)
     _Base.metadata.create_all(db_engine)
+    create_performance_enhancers(db_engine)
diff --git a/src/device/service/drivers/xr/README_XR.md b/src/device/service/drivers/xr/README_XR.md
index c741c3e808ebddd20c9c4749064964594ea32b73..fa1bc944035d27769cd9c16e0c29318e554e9489 100644
--- a/src/device/service/drivers/xr/README_XR.md
+++ b/src/device/service/drivers/xr/README_XR.md
@@ -146,6 +146,18 @@ arbitrary endpoints in the topology (with consequent underlying XR service insta
     PYTHONPATH=../../../../ ./service-cli.py list
     PYTHONPATH=../../../../ ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f
 ```
+
+It is also possible to create direct XR services without multi-layer services. E.g.:
+```
+    PYTHONPATH=../../../../  ./service-cli.py create-xr FooService X1-XR-CONSTELLATION  "XR HUB 1|XR-T1" "XR LEAF 2|XR-T1"
+```
+
+Additionally it is possible to list services and endpoints:
+```
+    PYTHONPATH=../../../../  ./service-cli.py list-endpoints
+    PYTHONPATH=../../../../  ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f
+```
+
 The PYTHONPATH is mandatory. Suitable topology JSON must have been loaded before. With the
 CocroachDB persistence, it is sufficient to load the topology once and it will persist.
 
diff --git a/src/device/service/drivers/xr/service-cli.py b/src/device/service/drivers/xr/service-cli.py
index 01bd2aaa118225cf74a953fff81b54abb857e39b..7ab9606cef7bd7d3cca4f414cbd704ab150c8f52 100755
--- a/src/device/service/drivers/xr/service-cli.py
+++ b/src/device/service/drivers/xr/service-cli.py
@@ -19,21 +19,33 @@
 #
 # Run in this directory with PYTHONPATH=../../../../
 # E.g.:
+#   Create multi-layer service (L2 VPN over XR):
 #     PYTHONPATH=../../../../  ./service-cli.py create 1 R1-EMU 13/1/2 500 2 R3-EMU 13/1/2 500
+#   Single-layer (XR without services on top of it):
+#     PYTHONPATH=../../../../  ./service-cli.py create-xr FooService X1-XR-CONSTELLATION  "XR HUB 1|XR-T1" "XR LEAF 2|XR-T1"
+#   List services:
 #     PYTHONPATH=../../../../  ./service-cli.py list
+#   List possible endpoints:
+#     PYTHONPATH=../../../../  ./service-cli.py list-endpoints
+#   Delete service (if multi-layer, always deleter highest layer!)
 #     PYTHONPATH=../../../../  ./service-cli.py delete 43a8046a-5dec-463d-82f7-7cc3442dbf4f
 
-
 import argparse
 import logging
-import traceback
+from copy import deepcopy
+from dataclasses import dataclass, field
+from typing import Dict
 from contextlib import contextmanager
 
 from common.Settings import get_setting
 from context.client.ContextClient import ContextClient
+from service.client.ServiceClient import ServiceClient
 from tests.tools.mock_osm.MockOSM import MockOSM
-from common.proto.context_pb2 import ContextId, ServiceTypeEnum, ServiceStatusEnum
+from common.proto.context_pb2 import ContextId, ServiceTypeEnum, ServiceStatusEnum, Service, Empty, ServiceId
 from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Topology import json_topology_id
+from common.tools.object_factory.ConfigRule import json_config_rule_set
 
 LOGGER = logging.getLogger(__name__)
 
@@ -48,11 +60,52 @@ def make_context_client():
     finally:
         _client.close()
 
+@contextmanager
+def make_service_client():
+    try:
+        _client = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC'))
+        yield _client
+    finally:
+        _client.close()
+
 def make_osm_wim():
     wim_url = 'http://{:s}:{:s}'.format(
         get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
     return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
 
+@dataclass
+class DevInfo:
+    name: str
+    uuid: str
+    endpoints: Dict[str, str] = field(default_factory= dict)
+    endpoints_by_uuid: Dict[str, str] = field(default_factory= dict)
+
+    def get_endpoint_uuid_or_exit(self, ep_name: str) -> str:
+        if ep_name not in self.endpoints:
+            print(f"Endpoint {ep_name} does not exist in device {self.name}. See \"service-cli.py list-endpoints\"")
+            exit(-1)
+        return self.endpoints[ep_name]
+
+def get_devices(cc: ContextClient) -> Dict[str, DevInfo]:
+    r = cc.ListDevices(Empty())
+    # print(grpc_message_to_json_string(r))
+
+    devices = dict()
+    for dev in r.devices:
+        di = DevInfo(dev.name, dev.device_id.device_uuid.uuid)
+        for ep in dev.device_endpoints:
+            di.endpoints[ep.name] = ep.endpoint_id.endpoint_uuid.uuid
+            di.endpoints_by_uuid[ep.endpoint_id.endpoint_uuid.uuid] = ep.name
+        devices[dev.name] = di
+    return devices
+
+def get_endpoint_map(devices: Dict[str, DevInfo]):
+    ep_map = dict()
+    for dev in devices.values():
+        for ep_name, ep_uuid in dev.endpoints.items():
+            ep_map[ep_uuid] = (dev.name, ep_name)
+    return ep_map
+
 logging.basicConfig(level=logging.ERROR)
 
 parser = argparse.ArgumentParser(description='TF Service Management Utility')
@@ -74,6 +127,13 @@ delete_parser = subparsers.add_parser('delete')
 delete_parser.add_argument('service_uuid', type=str, help='UUID of the service to be deleted')
 
 list_parser = subparsers.add_parser('list')
+list_parser = subparsers.add_parser('list-endpoints')
+
+create_xr_parser = subparsers.add_parser('create-xr')
+create_xr_parser.add_argument('service_name', type=str, help='Service Name')
+create_xr_parser.add_argument('constellation', type=str, help='XR Constellation')
+create_xr_parser.add_argument('interface1', type=str, help='One endpoint of the service')
+create_xr_parser.add_argument('interface2', type=str, help='Second endpoint of the service')
 
 args = parser.parse_args()
 
@@ -103,12 +163,17 @@ else:
     WIM_SERVICE_CONNECTION_POINTS = []
 
 #print(str(args))
-print(f"=== WIM_SERVICE_TYPE: {WIM_SERVICE_TYPE}")
-print(f"=== WIM_SERVICE_CONNECTION_POINTS: {WIM_SERVICE_CONNECTION_POINTS}")
-print(f"=== WIM_MAPPING: {WIM_MAPPING}")
+#print(f"=== WIM_SERVICE_TYPE: {WIM_SERVICE_TYPE}")
+#print(f"=== WIM_SERVICE_CONNECTION_POINTS: {WIM_SERVICE_CONNECTION_POINTS}")
+#print(f"=== WIM_MAPPING: {WIM_MAPPING}")
 
 with make_context_client() as client:
-    osm_wim = make_osm_wim();
+    # We only permit one context on our demos/testing
+    response = client.ListContextIds(Empty())
+    assert len(response.context_ids) == 1
+    context_uuid=json_context_id(response.context_ids[0].context_uuid.uuid)
+
+    osm_wim = make_osm_wim()
 
     if args.command == "create":
         service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
@@ -117,28 +182,122 @@ with make_context_client() as client:
         print(f"*** Get created service status --> {str(status)}")
 
     elif args.command == "delete":
-        osm_wim.wim.check_credentials()
+        service_id = {
+            "context_id": context_uuid,
+            "service_uuid": {
+                "uuid": args.service_uuid
+            }
+        }
+
         try:
-            osm_wim.wim.delete_connectivity_service(args.service_uuid)
-            print(f"*** Service {args.service_uuid} is no longer present (delete was successfull or service did not exist)")
-        except Exception as e:
-            print(f"*** Failed to delete service {args.service_uuid}, {e}")
+            response = client.GetService(ServiceId(**service_id))
+            #print(grpc_message_to_json_string(response))
+
+            high_level_delete = response.service_type == ServiceTypeEnum.SERVICETYPE_L2NM or response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+            print(f"Deleting service {response.name}, type {ServiceTypeEnum.Name(response.service_type)}, {high_level_delete=}")
+
+        except:
+            print(f"No service with uuid {args.service_uuid} ({service_id})")
+            exit(-1)
+
+        if high_level_delete:
+            osm_wim.wim.check_credentials()
+            try:
+                osm_wim.wim.delete_connectivity_service(args.service_uuid)
+                print(f"*** Service {args.service_uuid} deleted (L2SM/L3SM layer)")
+            except Exception as e:
+                print(f"*** Failed to delete service {args.service_uuid}, {e}")
+        else:
+            with make_service_client() as service_client:
+                try:
+                    service_client.DeleteService(ServiceId(**service_id))
+                    print(f"*** Service {args.service_uuid} deleted (low level)")
+                except Exception as e:
+                    print(f"*** Failed to delete service {args.service_uuid}, {e}")
+
+    elif args.command == "create-xr":
+        CONTEXT_NAME = 'admin'
+        CONTEXT_ID   = json_context_id(CONTEXT_NAME)
+        CONTEXT      = json_context(CONTEXT_NAME, name=CONTEXT_NAME)
+
+        json_tapi_settings = {
+            'capacity_value'  : 50.0,
+            'capacity_unit'   : 'GHz',
+            'layer_proto_name': 'PHOTONIC_MEDIA',
+            'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC',
+            'direction'       : 'UNIDIRECTIONAL',
+        }
+        config_rule = json_config_rule_set('/settings', json_tapi_settings)
+
+        devices = get_devices(client)
+        if args.constellation not in devices:
+            print(f"Constellation {args.constellation} does not exist as a device. See \"service-cli.py list-endpoints\"")
+            exit(-1)
+        else:
+            dev_info = devices[args.constellation]
+            constellation_uuid = dev_info.uuid
+
+        interface1_uuid = dev_info.get_endpoint_uuid_or_exit(args.interface1)
+        interface2_uuid = dev_info.get_endpoint_uuid_or_exit(args.interface2)
+
+        print(f"Constellation {args.constellation:40}: {constellation_uuid:36}")
+        print(f"Interface 1   {args.interface1:40}: {interface1_uuid:36}")
+        print(f"Interface 2   {args.interface2:40}: {interface2_uuid:36}")
+
+        service_request = {
+            "name": args.service_name,
+            "service_id": {
+                 "context_id": {"context_uuid": {"uuid": response.context_ids[0].context_uuid.uuid}},
+                 "service_uuid": {"uuid": args.service_name}
+            },
+            'service_type'        : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
+            "service_endpoint_ids": [
+                {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': interface1_uuid}, 'topology_id': json_topology_id("admin", context_id=context_uuid)},
+                {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': interface2_uuid}, 'topology_id': json_topology_id("admin", context_id=context_uuid)}
+            ],
+            'service_status'      : {'service_status': ServiceStatusEnum.SERVICESTATUS_PLANNED},
+            'service_constraints' : [],
+        }
+
+        with make_service_client() as service_client:
+            sr = deepcopy(service_request)
+            endpoints, sr['service_endpoint_ids'] = sr['service_endpoint_ids'], []
+            create_response = service_client.CreateService(Service(**sr))
+            print(f'CreateService: {grpc_message_to_json_string(create_response)}')
+
+            sr['service_endpoint_ids'] = endpoints
+            #sr['service_id']['service_uuid'] = create_response
+            sr['service_config'] = {'config_rules': [config_rule]}
+
+            update_response = service_client.UpdateService(Service(**sr))
+            print(f'UpdateService: {grpc_message_to_json_string(update_response)}')
+
     elif args.command == "list":
+        devices = get_devices(client)
+        ep_map = get_endpoint_map(devices)
+
         response = client.ListServices(ContextId(**CONTEXT_ID))
 
-        #print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+        # print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
         for service in response.services:
             scs = ""
-            
-            # See if there are endpoint constraints that might be regognizable by the user.
-            # Keys do not necessarily exist, so catch exceptions and ignore those constraints
-            # that we cannot easily represent.
-            for sc in service.service_constraints:
-                try:
-                    scs += f"{sc.endpoint_location.endpoint_id.device_id.device_uuid.uuid}:{sc.endpoint_location.endpoint_id.endpoint_uuid.uuid} "
-                except Exception:
-                    pass
 
-            print(f"{service.service_id.service_uuid.uuid:36}  {ServiceTypeEnum.Name(service.service_type):40}  {ServiceStatusEnum.Name(service.service_status.service_status)}  {scs}")
+            ep_list = []
+            for ep in service.service_endpoint_ids:
+                ep_uuid = ep.endpoint_uuid.uuid
+                if ep_uuid in ep_map:
+                    dev_name, ep_name = ep_map[ep_uuid]
+                    ep_list.append(f"{dev_name}:{ep_name}")
+            ep_list.sort()
+            eps = ", ".join(ep_list)
 
+            #print(f"{service.service_id.service_uuid.uuid:36}  {ServiceTypeEnum.Name(service.service_type):40}  {service.name:40}  {ServiceStatusEnum.Name(service.service_status.service_status)}  {scs}")
+            print(f"{service.service_id.service_uuid.uuid:36}  {ServiceTypeEnum.Name(service.service_type):40}  {service.name:40}  {ServiceStatusEnum.Name(service.service_status.service_status):28}  {eps}")
 
+    elif args.command == "list-endpoints":
+        devices = get_devices(client)
+        for name in sorted(devices.keys()):
+            dev = devices[name]
+            print(f"{name:40}    {dev.uuid:36}")
+            for ep_name in sorted(dev.endpoints.keys()):
+                print(f"    {ep_name:40}    {dev.endpoints[ep_name]:36}")
diff --git a/src/device/service/drivers/xr/setup_test_env.sh b/src/device/service/drivers/xr/setup_test_env.sh
index 92ff4a0312fb8f963f934f4cfd8d18603675aed0..bd5463cd4f9d08c903fc601cfcb7241b672e7681 100755
--- a/src/device/service/drivers/xr/setup_test_env.sh
+++ b/src/device/service/drivers/xr/setup_test_env.sh
@@ -17,7 +17,11 @@ export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get service/contextservice --namesp
 export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service/contextservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}')
 export COMPUTESERVICE_SERVICE_HOST=$(kubectl get service/computeservice --namespace tfs  --template '{{.spec.clusterIP}}')
 export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service/computeservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
+export SERVICESERVICE_SERVICE_HOST=$(kubectl get service/serviceservice --namespace tfs  --template '{{.spec.clusterIP}}')
+export SERVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service/serviceservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}')
 echo "CONTEXTSERVICE_SERVICE_HOST=$CONTEXTSERVICE_SERVICE_HOST"
 echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=$CONTEXTSERVICE_SERVICE_PORT_GRPC"
 echo "COMPUTESERVICE_SERVICE_HOST=$COMPUTESERVICE_SERVICE_HOST"
 echo "COMPUTESERVICE_SERVICE_PORT_HTTP=$COMPUTESERVICE_SERVICE_PORT_HTTP"
+echo "SERVICESERVICE_SERVICE_HOST=$SERVICESERVICE_SERVICE_HOST"
+echo "SERVICESERVICE_SERVICE_PORT_GRPC=$SERVICESERVICE_SERVICE_PORT_GRPC"
diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py
index b71dd9a35329e2aef6ce64739f59103a656b4de3..9ae3cdc1216891ca4dfcf01c1bd49d27bf4ef6f6 100644
--- a/src/load_generator/load_gen/Constants.py
+++ b/src/load_generator/load_gen/Constants.py
@@ -26,3 +26,5 @@ ENDPOINT_COMPATIBILITY = {
     'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:INPUT': 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:OUTPUT',
     'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT'  : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT',
 }
+
+MAX_WORKER_THREADS = 10
\ No newline at end of file
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index a6d14307eee9bbc531e09495d4b650e361aa3d26..e94dc0cb948d703f71925fd932e749ebb544650e 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -230,9 +230,9 @@ class RequestGenerator:
         ]
 
         if request_type == RequestType.SERVICE_L2NM:
-            availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-            capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-            e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -275,9 +275,9 @@ class RequestGenerator:
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
         elif request_type == RequestType.SERVICE_L3NM:
-            availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-            capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-            e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -380,9 +380,9 @@ class RequestGenerator:
             json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
         ]
 
-        availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-        capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-        e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+        availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
+        capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+        e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
         constraints = [
             json_constraint_sla_availability(1, True, availability),
             json_constraint_sla_capacity(capacity_gbps),
diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py
index 57afe80bec569b29d2931256a8c1cf7a1ab3eb85..773a37eac258f8b3c16c966464ced124d3c77c85 100644
--- a/src/load_generator/load_gen/RequestScheduler.py
+++ b/src/load_generator/load_gen/RequestScheduler.py
@@ -21,6 +21,7 @@ from typing import Dict, Optional
 from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
+from .Constants import MAX_WORKER_THREADS
 from .DltTools import explore_entities_to_record, record_entities
 from .Parameters import Parameters
 from .RequestGenerator import RequestGenerator
@@ -37,7 +38,7 @@ class RequestScheduler:
         self._scheduler = scheduler_class()
         self._scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=10)},
+            executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)},
             job_defaults = {
                 'coalesce': False,
                 'max_instances': 100,
diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py
index f55527e4756022fc4941605f54ab82b74c0937f0..fbc554aa261cbc68009258d322aa01d52bfe760d 100644
--- a/src/service/service/task_scheduler/TaskScheduler.py
+++ b/src/service/service/task_scheduler/TaskScheduler.py
@@ -130,7 +130,7 @@ class TasksScheduler:
                 self._dag.add(connection_key, service_key_done)
 
         t1 = time.time()
-        LOGGER.info('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0))
+        LOGGER.debug('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0))
 
     def compose_from_service(self, service : Service, is_delete : bool = False) -> None:
         t0 = time.time()
@@ -196,11 +196,11 @@ class TasksScheduler:
                 raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item)))
 
         t1 = time.time()
-        LOGGER.info('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0))
+        LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0))
 
     def execute_all(self, dry_run : bool = False) -> None:
         ordered_task_keys = list(self._dag.static_order())
-        LOGGER.info('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys)))
+        LOGGER.debug('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys)))
 
         results = []
         for task_key in ordered_task_keys:
@@ -208,5 +208,5 @@ class TasksScheduler:
             succeeded = True if dry_run else task.execute()
             results.append(succeeded)
 
-        LOGGER.info('[execute_all] results={:s}'.format(str(results)))
+        LOGGER.debug('[execute_all] results={:s}'.format(str(results)))
         return zip(ordered_task_keys, results)
diff --git a/src/slice/service/README.md b/src/slice/service/README.md
deleted file mode 100644
index 696b4a6e099cfc8463db6f93e5940cbc1d9c32e1..0000000000000000000000000000000000000000
--- a/src/slice/service/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# SLICE GROUPING details
-
-## Description
-- Similar slice requests can share underlying services.
-- Clustering algorithm for slice grouping.
-- Consider both paths and SLA constraints.
-- SLA monitored by slice group.
-
-## TFS Target Objective
-- Objective 3.2: Provisioning of multi-tenant transport network slices.
-- Improve network resource usage by 30% by adopting multi-tenancy resource allocation algorithms.
-- Optimal slice grouping: trade-offs between economies of scale and limitations as to which SLAs can be grouped together need to be considered.
-- Optimal grouping of slices is required to maximise KPIs, such as resource utilisation, utility of the connectivity, and energy efficiency.
-- In this context, trade-offs between the resulting control plane complexity and differential treatment of SLA classes should be considered.
-
-## New Requirements
-- User can select if slice grouping is performed per-slice request.
-- Slice grouping introduces a clustering algorithm for finding service optimisation while preserving slice SLA.
-- Service (re-)optimisation is provided.
-
-## TFS Architecture Update
-- Update Slice service RPC to include Slice Grouping.
-- Use novel Slice model with SLA constraints.
-- Use Policy Component with action to update services to apply slice grouping.
-- Describe Slice service operation modes: per-request or user-triggered.
-
-    OSS/BSS --> Slice   : Create Slice with SLA (slice)
-    Slice   --> Slice   : Slice Grouping (slice)
-alt [slice can be grouped to other slice services]
-    // do nothing and return existing slice
-else [slice needs new services]
-    Slice   --> ... : normal logic
-end alt
-    Slice   --> OSS/BSS : slice
-
-slice.proto:
-  rpc OrderSliceWithSLA(context.Slice) returns (context.SliceId) {} // If slice with SLA already exists, returns slice. If not, it creates it.
-  rpc RunSliceGrouping (context.Empty) returns (context.Empty) {} // Optimizes the underlying services and re-maps them to the requested slices.
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index 717127a0048a5bb20a0f1689268f7029f7cf0438..acec3ae303266714ae7f50c5c0d78fc41d350ea1 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -64,7 +64,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             # unable to identify the kind of slice; just update endpoints, constraints and config rules
             # update the slice in database, and return
             # pylint: disable=no-member
-            return context_client.SetSlice(slice_rw)
+            reply = context_client.SetSlice(slice_rw)
+            context_client.close()
+            return reply
 
         slice_with_uuids = context_client.GetSlice(slice_id_with_uuids)
 
@@ -82,10 +84,12 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             slice_active.CopyFrom(slice_)
             slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
             context_client.SetSlice(slice_active)
+            interdomain_client.close()
+            context_client.close()
             return slice_id
 
         if self._slice_grouper.is_enabled:
-            grouped = self._slice_grouper.group(slice_with_uuids)
+            grouped = self._slice_grouper.group(slice_with_uuids) # pylint: disable=unused-variable
 
         # Local domain slice
         service_id = ServiceId()
@@ -159,6 +163,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         slice_active.CopyFrom(slice_)
         slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
         context_client.SetSlice(slice_active)
+
+        service_client.close()
+        context_client.close()
         return slice_id
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
@@ -195,6 +202,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         try:
             _slice = context_client.GetSlice(request)
         except: # pylint: disable=bare-except
+            context_client.close()
             return Empty()
 
         if is_multi_domain(context_client, _slice.slice_endpoint_ids):
@@ -208,7 +216,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             context_client.SetSlice(current_slice)
 
             if self._slice_grouper.is_enabled:
-                ungrouped = self._slice_grouper.ungroup(current_slice)
+                ungrouped = self._slice_grouper.ungroup(current_slice) # pylint: disable=unused-variable
 
             service_client = ServiceClient()
             for service_id in _slice.slice_service_ids:
@@ -219,6 +227,8 @@ class SliceServiceServicerImpl(SliceServiceServicer):
                 context_client.UnsetSlice(current_slice)
 
                 service_client.DeleteService(service_id)
+            service_client.close()
 
         context_client.RemoveSlice(request)
+        context_client.close()
         return Empty()
diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py
index 12337cf8ee02656439e6c4284358c995afe1078a..ca957f3c7760eb65b649d22ecb5b57dee3e08dab 100644
--- a/src/slice/service/slice_grouper/Tools.py
+++ b/src/slice/service/slice_grouper/Tools.py
@@ -115,11 +115,25 @@ def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, flo
     if slice_group_obj is None:
         raise NotFoundException('Slice', group_name, extra_details='while adding to group')
 
-    for subslice_id in slice_group_obj.slice_subslice_ids:
-        if subslice_id == slice_obj.slice_id: break # already added
-    else:
-        slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id)
-        # TODO: add other logic, such as re-configure parent slice
+    del slice_group_obj.slice_endpoint_ids[:]
+    for endpoint_id in slice_obj.slice_endpoint_ids:
+        slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id)
+
+    del slice_group_obj.slice_constraints[:]
+    del slice_group_obj.slice_service_ids[:]
+
+    del slice_group_obj.slice_subslice_ids[:]
+    slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id)
+
+    del slice_group_obj.slice_config.config_rules[:]
+    for config_rule in slice_obj.slice_config.config_rules:
+        group_config_rule = slice_group_obj.slice_config.config_rules.add()
+        group_config_rule.CopyFrom(config_rule)
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        TEMPLATE = '/subslice[{:s}]{:s}'
+        slice_resource_key = config_rule.custom.resource_key
+        group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key)
+        group_config_rule.custom.resource_key = group_resource_key
 
     context_client.SetSlice(slice_group_obj)
 
@@ -139,13 +153,23 @@ def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float
         raise NotFoundException('Slice', group_name, extra_details='while removing from group')
 
     if slice_obj.slice_id in slice_group_obj.slice_subslice_ids:
-        slice_group_obj.slice_subslice_ids.remove(slice_obj.slice_id)
-        # TODO: other logic, such as deconfigure parent slice
-
         tmp_slice_group_obj = Slice()
-        tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id) # pylint: disable=no-member
-        slice_subslice_id = tmp_slice_group_obj.slice_subslice_ids.add() # pylint: disable=no-member
-        slice_subslice_id.CopyFrom(slice_obj.slice_id)
+        tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id)             # pylint: disable=no-member
+
+        tmp_slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id)   # pylint: disable=no-member
+
+        for endpoint_id in slice_obj.slice_endpoint_ids:
+            tmp_slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id)      # pylint: disable=no-member
+
+        for config_rule in slice_obj.slice_config.config_rules:
+            group_config_rule = tmp_slice_group_obj.slice_config.config_rules.add() # pylint: disable=no-member
+            group_config_rule.CopyFrom(config_rule)
+            if group_config_rule.WhichOneof('config_rule') != 'custom': continue
+            TEMPLATE = '/subslice[{:s}]{:s}'
+            slice_resource_key = group_config_rule.custom.resource_key
+            group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key)
+            group_config_rule.custom.resource_key = group_resource_key
+
         context_client.UnsetSlice(tmp_slice_group_obj)
 
     metrics_exporter = MetricsExporter()
diff --git a/src/slice/tests/old/Main.py b/src/slice/tests/old/Main.py
new file mode 100644
index 0000000000000000000000000000000000000000..0924f1c646e9722bf23354d0787786375663e85f
--- /dev/null
+++ b/src/slice/tests/old/Main.py
@@ -0,0 +1,98 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os, pandas, random, sys, time
+#from matplotlib import pyplot as plt
+from sklearn.cluster import KMeans
+from typing import Dict, List, Tuple
+
+os.environ['METRICSDB_HOSTNAME' ] = '127.0.0.1' #'questdb-public.qdb.svc.cluster.local'
+os.environ['METRICSDB_ILP_PORT' ] = '9009'
+os.environ['METRICSDB_REST_PORT'] = '9000'
+
+from .MetricsExporter import MetricsExporter # pylint: disable=wrong-import-position
+
+logging.basicConfig(level=logging.DEBUG)
+LOGGER : logging.Logger = logging.getLogger(__name__)
+
+def get_random_slices(count : int) -> List[Tuple[str, float, float]]:
+    slices = list()
+    for i in range(count):
+        slice_name          = 'slice-{:03d}'.format(i)
+        slice_availability  = random.uniform(00.0, 99.99)
+        slice_capacity_gbps = random.uniform(0.1, 100.0)
+        slices.append((slice_name, slice_availability, slice_capacity_gbps))
+    return slices
+
+def init_kmeans() -> Tuple[KMeans, Dict[str, int]]:
+    groups = [
+        # Name, avail[0..100], bw_gbps[0..100]
+        ('bronze',   10.0,  10.0), # ('silver',   25.0,  25.0),
+        ('silver',   30.0,  40.0), # ('silver',   25.0,  25.0),
+        ('gold',     70.0,  50.0), # ('gold',     90.0,  50.0),
+        ('platinum', 99.0, 100.0),
+    ]
+    df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity'])
+
+    num_clusters = len(groups)
+    k_means = KMeans(n_clusters=num_clusters)
+    k_means.fit(df_groups[['availability', 'capacity']])
+
+    df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']])
+    mapping = {
+        group['name']:{k:v for k,v in group.items() if k != 'name'}
+        for group in list(df_groups.to_dict('records'))
+    }
+
+    return k_means, mapping
+
+def main():
+    LOGGER.info('Starting...')
+    metrics_exporter = MetricsExporter()
+    metrics_exporter.create_table()
+
+    k_means, mapping = init_kmeans()
+    label_to_group = {}
+    for group_name,group_attrs in mapping.items():
+        label = group_attrs['label']
+        availability = group_attrs['availability']
+        capacity = group_attrs['capacity']
+        metrics_exporter.export_point(group_name, group_name, availability, capacity, is_center=True)
+        label_to_group[label] = group_name
+
+    slices = get_random_slices(10000)
+    for slice_ in slices:
+        sample = pandas.DataFrame([slice_[1:3]], columns=['availability', 'capacity'])
+        sample['label'] = k_means.predict(sample)
+        sample = sample.to_dict('records')[0]
+        label = sample['label']
+        availability = sample['availability']
+        capacity = sample['capacity']
+        group_name = label_to_group[label]
+        metrics_exporter.export_point(slice_[0], group_name, availability, capacity, is_center=False)
+        time.sleep(0.01)
+
+    #df_silver   = df_slices[df_slices['group']==mapping['silver']]
+    #df_gold     = df_slices[df_slices['group']==mapping['gold']]
+    #df_platinum = df_slices[df_slices['group']==mapping['platinum']]
+    #plt.scatter(df_silver.availability,         df_silver.capacity,             s=25,  c='black' )
+    #plt.scatter(df_gold.availability,           df_gold.capacity,               s=25,  c='gold'  )
+    #plt.scatter(df_platinum.availability,       df_platinum.capacity,           s=25,  c='silver')
+    #plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red'   )
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/slice/tests/old/MetricsExporter.py b/src/slice/tests/old/MetricsExporter.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c04cb9fcb1c7ab05c5274fb8e2a934a39b4cfdd
--- /dev/null
+++ b/src/slice/tests/old/MetricsExporter.py
@@ -0,0 +1,116 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging, os, requests
+from typing import Any, Literal, Union
+from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module
+
+LOGGER = logging.getLogger(__name__)
+
+MAX_RETRIES = 10
+DELAY_RETRIES = 0.5
+
+MSG_EXPORT_EXECUTED   = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed'
+MSG_EXPORT_FAILED     = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...'
+MSG_REST_BAD_STATUS   = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}'
+MSG_REST_EXECUTED     = '[rest_request] Query({:s}) executed, result: {:s}'
+MSG_REST_FAILED       = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...'
+MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}'
+
+METRICSDB_HOSTNAME  = os.environ.get('METRICSDB_HOSTNAME')
+METRICSDB_ILP_PORT  = int(os.environ.get('METRICSDB_ILP_PORT'))
+METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT'))
+METRICSDB_TABLE_SLICE_GROUPS = 'slice_groups'
+
+COLORS = {
+    'platinum': '#E5E4E2',
+    'gold'    : '#FFD700',
+    'silver'  : '#808080',
+    'bronze'  : '#CD7F32',
+}
+DEFAULT_COLOR = '#000000' # black
+
+class MetricsExporter():
+    def __init__(self) -> None:
+        pass
+
+    def create_table(self) -> None:
+        sql_query = ' '.join([
+            'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)),
+            ','.join([
+                'timestamp TIMESTAMP',
+                'slice_uuid SYMBOL',
+                'slice_group SYMBOL',
+                'slice_color SYMBOL',
+                'slice_availability DOUBLE',
+                'slice_capacity_center DOUBLE',
+                'slice_capacity DOUBLE',
+            ]),
+            ') TIMESTAMP(timestamp);'
+        ])
+        try:
+            result = self.rest_request(sql_query)
+            if not result: raise Exception
+            LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS)))
+        except Exception as e:
+            LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e)))
+            raise
+
+    def export_point(
+        self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float,
+        is_center : bool = False
+    ) -> None:
+        dt_timestamp = datetime.datetime.utcnow()
+        slice_color = COLORS.get(slice_group, DEFAULT_COLOR)
+        symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color)
+        columns = dict(slice_availability=slice_availability)
+        columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity
+
+        for retry in range(MAX_RETRIES):
+            try:
+                with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender:
+                    sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp)
+                    sender.flush()
+                LOGGER.info(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns)))
+                return
+            except (Exception, IngressError): # pylint: disable=broad-except
+                LOGGER.exception(MSG_EXPORT_FAILED.format(
+                    str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
+
+    def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]:
+        url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT)
+        params = {'query': rest_query, 'fmt': 'json'}
+
+        for retry in range(MAX_RETRIES):
+            try:
+                response = requests.get(url, params=params)
+                status_code = response.status_code
+                if status_code not in {200}:
+                    str_content = response.content.decode('UTF-8')
+                    raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content))
+
+                json_response = response.json()
+                if 'ddl' in json_response:
+                    LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl'])))
+                    return True
+                elif 'dataset' in json_response:
+                    LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset'])))
+                    return json_response['dataset']
+
+            except Exception: # pylint: disable=broad-except
+                LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES))
+
+        raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES))
diff --git a/src/slice/tests/old/test_kmeans.py b/src/slice/tests/old/test_kmeans.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f54621c57c3bfcc1741591e5d0a87781e640420
--- /dev/null
+++ b/src/slice/tests/old/test_kmeans.py
@@ -0,0 +1,77 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import pandas, random, sys
+from matplotlib import pyplot as plt
+from sklearn.cluster import KMeans
+from typing import Dict, List, Tuple
+
+def get_random_slices(count : int) -> List[Tuple[str, float, float]]:
+    slices = list()
+    for i in range(count):
+        slice_name          = 'slice-{:03d}'.format(i)
+        slice_availability  = random.uniform(00.0, 99.99)
+        slice_capacity_gbps = random.uniform(0.1, 100.0)
+        slices.append((slice_name, slice_availability, slice_capacity_gbps))
+    return slices
+
+def init_kmeans() -> Tuple[KMeans, Dict[str, int]]:
+    groups = [
+        # Name, avail[0..100], bw_gbps[0..100]
+        ('silver',   25.0,  50.0), # ('silver',   25.0,  25.0),
+        ('gold',     90.0,  10.0), # ('gold',     90.0,  50.0),
+        ('platinum', 99.0, 100.0),
+    ]
+    df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity'])
+
+    num_clusters = len(groups)
+    k_means = KMeans(n_clusters=num_clusters)
+    k_means.fit(df_groups[['availability', 'capacity']])
+
+    df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']])
+    mapping = {group['name']:group['label'] for group in list(df_groups.to_dict('records'))}
+
+    return k_means, mapping
+
+def main():
+    k_means, mapping = init_kmeans()
+    slices = get_random_slices(500)
+    df_slices = pandas.DataFrame(slices, columns=['slice_uuid', 'availability', 'capacity'])
+
+    # predict one
+    #sample = df_slices[['availability', 'capacity']].iloc[[0]]
+    #y_predicted = k_means.predict(sample)
+    #y_predicted
+
+    df_slices['group'] = k_means.predict(df_slices[['availability', 'capacity']])
+
+    df_silver   = df_slices[df_slices['group']==mapping['silver']]
+    df_gold     = df_slices[df_slices['group']==mapping['gold']]
+    df_platinum = df_slices[df_slices['group']==mapping['platinum']]
+
+    plt.scatter(df_silver.availability,         df_silver.capacity,             s=25,  c='black' )
+    plt.scatter(df_gold.availability,           df_gold.capacity,               s=25,  c='gold'  )
+    plt.scatter(df_platinum.availability,       df_platinum.capacity,           s=25,  c='silver')
+    plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red'   )
+    plt.xlabel('service-slo-availability')
+    plt.ylabel('service-slo-one-way-bandwidth')
+    #ax = plt.subplot(1, 1, 1)
+    #ax.set_ylim(bottom=0., top=1.)
+    #ax.set_xlim(left=0.)
+    plt.savefig('slice_grouping.png')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/slice/tests/old/test_subslices.py b/src/slice/tests/old/test_subslices.py
new file mode 100644
index 0000000000000000000000000000000000000000..39ee235df0e9d263244fa14436f609397bcea84f
--- /dev/null
+++ b/src/slice/tests/old/test_subslices.py
@@ -0,0 +1,96 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sqlalchemy, sys
+from sqlalchemy import Column, ForeignKey, String, event, insert
+from sqlalchemy.orm import Session, declarative_base, relationship
+from typing import Dict
+
+def _fk_pragma_on_connect(dbapi_con, con_record):
+    dbapi_con.execute('pragma foreign_keys=ON')
+
+_Base = declarative_base()
+
+class SliceModel(_Base):
+    __tablename__ = 'slice'
+
+    slice_uuid = Column(String, primary_key=True)
+
+    slice_subslices = relationship(
+        'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid')
+
+    def dump_id(self) -> Dict:
+        return {'uuid': self.slice_uuid}
+
+    def dump(self) -> Dict:
+        return {
+            'slice_id': self.dump_id(),
+            'slice_subslice_ids': [
+                slice_subslice.subslice.dump_id()
+                for slice_subslice in self.slice_subslices
+            ]
+        }
+
+class SliceSubSliceModel(_Base):
+    __tablename__ = 'slice_subslice'
+
+    slice_uuid    = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True)
+    subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True)
+
+    slice    = relationship('SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined')
+    subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined')
+
+def main():
+    engine = sqlalchemy.create_engine('sqlite:///:memory:', echo=False, future=True)
+    event.listen(engine, 'connect', _fk_pragma_on_connect)
+
+    _Base.metadata.create_all(engine)
+
+    slice_data = [
+        {'slice_uuid': 'slice-01'},
+        {'slice_uuid': 'slice-01-01'},
+        {'slice_uuid': 'slice-01-02'},
+    ]
+
+    slice_subslices_data = [
+        {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-01'},
+        {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-02'},
+    ]
+
+    # insert
+    with engine.connect() as conn:
+        conn.execute(insert(SliceModel).values(slice_data))
+        conn.execute(insert(SliceSubSliceModel).values(slice_subslices_data))
+        conn.commit()
+
+    # read
+    with Session(engine) as session:
+        obj_list = session.query(SliceModel).all()
+        print([obj.dump() for obj in obj_list])
+        session.commit()
+
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
+
+[
+    {'slice_id': {'uuid': 'slice-01'}, 'slice_subslice_ids': [
+        {'uuid': 'slice-01-01'},
+        {'uuid': 'slice-01-02'}
+    ]},
+    {'slice_id': {'uuid': 'slice-01-01'}, 'slice_subslice_ids': []},
+    {'slice_id': {'uuid': 'slice-01-02'}, 'slice_subslice_ids': []}
+]
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index ef5253b876085c152fa7a71ffb5a29cfd1f90516..fca1071419b3b2b61739c2a0d1d8bfa45aba5119 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -98,6 +98,7 @@ def create_app(use_config=None, web_app_root=None):
     app.jinja_env.globals.update({              # pylint: disable=no-member
         'enumerate'           : enumerate,
         'json_to_list'        : json_to_list,
+        'round'               : round,
         'get_working_context' : get_working_context,
         'get_working_topology': get_working_topology,
     })
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html
index d99ede3e02c9716782317efc60fcc8d92e2e811a..bee2e93c53896a8eeac826703a60afe02a5aa825 100644
--- a/src/webui/service/templates/service/detail.html
+++ b/src/webui/service/templates/service/detail.html
@@ -141,7 +141,7 @@
             <td>SLA Capacity</td>
             <td>-</td>
             <td>
-                {{ constraint.sla_capacity.capacity_gbps }} Gbps
+                {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps
             </td>
         </tr>
         {% elif constraint.WhichOneof('constraint')=='sla_latency' %}
@@ -149,7 +149,7 @@
             <td>SLA E2E Latency</td>
             <td>-</td>
             <td>
-                {{ constraint.sla_latency.e2e_latency_ms }} ms
+                {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms
             </td>
         </tr>
         {% elif constraint.WhichOneof('constraint')=='sla_availability' %}
@@ -157,7 +157,7 @@
             <td>SLA Availability</td>
             <td>-</td>
             <td>
-                {{ constraint.sla_availability.availability }} %;
+                {{ round(constraint.sla_availability.availability, ndigits=5) }} %;
                 {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
                 {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active
             </td>
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
index 6c8d15aed6fcf91580e9fa3bfe9f2f9a14e7666b..8f223e44deda37b177a360a51b1e366f680fac27 100644
--- a/src/webui/service/templates/slice/detail.html
+++ b/src/webui/service/templates/slice/detail.html
@@ -141,7 +141,7 @@
             <td>SLA Capacity</td>
             <td>-</td>
             <td>
-                {{ constraint.sla_capacity.capacity_gbps }} Gbps
+                {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps
             </td>
         </tr>
         {% elif constraint.WhichOneof('constraint')=='sla_latency' %}
@@ -149,7 +149,7 @@
             <td>SLA E2E Latency</td>
             <td>-</td>
             <td>
-                {{ constraint.sla_latency.e2e_latency_ms }} ms
+                {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms
             </td>
         </tr>
         {% elif constraint.WhichOneof('constraint')=='sla_availability' %}
@@ -157,7 +157,7 @@
             <td>SLA Availability</td>
             <td>-</td>
             <td>
-                {{ constraint.sla_availability.availability }} %;
+                {{ round(constraint.sla_availability.availability, ndigits=5) }} %;
                 {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
                 {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active
             </td>