diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index 61f5b1d21e0e6305449020fccb139a0dfe21b046..e7e5c1604a8b971424ff5f7e5bf292c4b263cbfe 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -36,7 +36,7 @@ spec:
         - containerPort: 9192
         env:
         - name: LOG_LEVEL
-          value: "DEBUG"
+          value: "INFO"
         - name: SLICE_GROUPING
           value: "DISABLE"
         envFrom:
diff --git a/src/device/service/drivers/openconfig/templates/Inventory.py b/src/device/service/drivers/openconfig/templates/Inventory.py
index e45958538cc28c91125146c3b27e7acef6c9aed9..65562bc5bd21a6d50d28bdf754f1e60015930920 100644
--- a/src/device/service/drivers/openconfig/templates/Inventory.py
+++ b/src/device/service/drivers/openconfig/templates/Inventory.py
@@ -15,7 +15,7 @@
 import logging, lxml.etree as ET
 from typing import Any, Dict, List, Tuple
 from .Namespace import NAMESPACES
-from .Tools import add_value_from_tag, add_int_from_tag
+from .Tools import add_value_from_tag
 
 LOGGER = logging.getLogger(__name__)
 
@@ -55,8 +55,6 @@ XPATH_PORTS = "//ocp:components/ocp:component"
 def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
     response = []
     parent_types = {}
-    #Initialized count to 0 for index
-    count = 0
     for xml_component in xml_data.xpath(XPATH_PORTS, namespaces=NAMESPACES):
         LOGGER.info('xml_component inventario = {:s}'.format(str(ET.tostring(xml_component))))
         inventory = {}
@@ -65,7 +63,6 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         inventory['class'] = ''
         inventory['attributes'] = {}
         component_reference = []
-        
 
         component_name = xml_component.find('ocp:name', namespaces=NAMESPACES)
         if component_name is None or component_name.text is None: continue
@@ -85,34 +82,6 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
             add_value_from_tag(inventory, 'class', component_type)
         
         if inventory['class'] == 'CPU' or inventory['class'] == 'STORAGE': continue
-
-        ##Added (after the checking of the name and the class)
-        #Physical index- Index of the component in the array
-    
-        add_int_from_tag(inventory['attributes'], 'physical-index', count)
-        count +=1
-
-        ##Added
-        #FRU
-        if inventory['class'] == 'FRU':
-            component_isfru = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES)
-            add_value_from_tag(inventory['attributes'], 'isfru', component_isfru)
-        ##ID
-        component_id = xml_component.find('ocp:state/ocp:id', namespaces=NAMESPACES)
-        if not component_id is None:
-            add_value_from_tag(inventory['attributes'], 'id', component_id)
-
-        ##OPER_STATUS
-        component_oper_status = xml_component.find('ocp:state/ocp:oper-status', namespaces=NAMESPACES)
-        if not component_oper_status is None:
-            add_value_from_tag(inventory['attributes'], 'oper-status', component_oper_status)
-
-        ##MODEL_ID  
-        component_model_id = xml_component.find('ocp:state/ocp:entity-id', namespaces=NAMESPACES)
-        if not component_model_id is None:
-            add_value_from_tag(inventory['attributes'], 'model-id', component_model_id)
-    
-    ##
         
         component_empty = xml_component.find('ocp:state/ocp:empty', namespaces=NAMESPACES)
         if not component_empty is None:
diff --git a/src/device/service/drivers/openconfig/templates/Tools.py b/src/device/service/drivers/openconfig/templates/Tools.py
index 78e61e0ae6651a0ed92cfca2fd4ee0c66499f496..79bebef5179b3464c33ce7fa0663b0cd35a51fc0 100644
--- a/src/device/service/drivers/openconfig/templates/Tools.py
+++ b/src/device/service/drivers/openconfig/templates/Tools.py
@@ -26,11 +26,6 @@ def add_value_from_tag(target : Dict, field_name: str, field_value : ET.Element,
     if cast is not None: field_value = cast(field_value)
     target[field_name] = field_value
 
-def add_int_from_tag(target : Dict, field_name: str, field_value : int, cast=None) -> None:
-    if field_value is None: return
-    if cast is not None: field_value = cast(field_value)
-    target[field_name] = field_value
-
 def add_value_from_collection(target : Dict, field_name: str, field_value : Collection) -> None:
     if field_value is None or len(field_value) == 0: return
     target[field_name] = field_value
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index 52552a6ed3b753b963a49bbe9b6ba70c47fab138..8a834f352ff3e562c068fb0ce1df5c90117238a6 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -12,8 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-#agrupar slices agrupar recursos para no hacer mas configs
-
 from typing import Optional
 import grpc, json, logging #, deepdiff
 from common.proto.context_pb2 import (
@@ -32,14 +30,13 @@ from interdomain.client.InterdomainClient import InterdomainClient
 from service.client.ServiceClient import ServiceClient
 from .slice_grouper.SliceGrouper import SliceGrouper
 
-LOGGER = logging.getLogger(__name__) #crea un objeto de registro con el nombre del modulo actual
-
+LOGGER = logging.getLogger(__name__)
 METRICS_POOL = MetricsPool('Slice', 'RPC')
 
-class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio gRPC definido por SliceServiceServicer 
+class SliceServiceServicerImpl(SliceServiceServicer):
     def __init__(self):
         LOGGER.debug('Creating Servicer...')
-        self._slice_grouper = SliceGrouper() #crea una instancia de slicegrouper
+        self._slice_grouper = SliceGrouper()
         LOGGER.debug('Servicer Created')
 
     def create_update(self, request : Slice) -> SliceId:
@@ -48,9 +45,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
         context_client = ContextClient()
         slice_ro : Optional[Slice] = get_slice_by_id(context_client, request.slice_id, rw_copy=False) # se obtiene la slice con el sliceId de la req
 
-        slice_rw = Slice() #crea nueva slice desde la slice de la req
+        slice_rw = Slice()
         slice_rw.CopyFrom(request if slice_ro is None else slice_ro)
-        if len(request.name) > 0: slice_rw.name = request.name #actualizamos el nombre y estado de la slice rw
+        if len(request.name) > 0: slice_rw.name = request.name
         slice_rw.slice_owner.CopyFrom(request.slice_owner)                          # pylint: disable=no-member
         slice_rw.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED    # pylint: disable=no-member
 
@@ -59,7 +56,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
         copy_constraints (request.slice_constraints,         slice_rw.slice_constraints        ) # pylint: disable=no-member
         copy_config_rules(request.slice_config.config_rules, slice_rw.slice_config.config_rules) # pylint: disable=no-member
 
-        slice_id_with_uuids = context_client.SetSlice(slice_rw) #actualizar o crear la slice en la db
+        slice_id_with_uuids = context_client.SetSlice(slice_rw)
 
         if len(slice_rw.slice_endpoint_ids) < 2: # pylint: disable=no-member
             # unable to identify the kind of slice; just update endpoints, constraints and config rules
@@ -68,18 +65,17 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
             reply = context_client.SetSlice(slice_rw)
             context_client.close()
             return reply
-        #si tiene menos de 2 endpoints se omite la actualizacion y se retorna el sliceid
-
-        slice_with_uuids = context_client.GetSlice(slice_id_with_uuids) #obtenemos la slice actualizada
-
+        
+        slice_with_uuids = context_client.GetSlice(slice_id_with_uuids)
+        
         #LOGGER.info('json_current_slice = {:s}'.format(str(json_current_slice)))
         #json_updated_slice = grpc_message_to_json(request)
         #LOGGER.info('json_updated_slice = {:s}'.format(str(json_updated_slice)))
         #changes = deepdiff.DeepDiff(json_current_slice, json_updated_slice)
         #LOGGER.info('changes = {:s}'.format(str(changes)))
 
-        if is_inter_domain(context_client, slice_with_uuids.slice_endpoint_ids): #si la slice es interdominio
-            interdomain_client = InterdomainClient() #que es interdomain client?
+        if is_inter_domain(context_client, slice_with_uuids.slice_endpoint_ids):
+            interdomain_client = InterdomainClient()
             slice_id = interdomain_client.RequestSlice(slice_with_uuids)
             slice_ = context_client.GetSlice(slice_id)
             slice_active = Slice()
@@ -101,10 +97,10 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
 
         service_client = ServiceClient()
         try:
-            _service = context_client.GetService(service_id) #obtener info de un servicio si existe
+            _service = context_client.GetService(service_id)
         except: # pylint: disable=bare-except
             # pylint: disable=no-member
-            service_request = Service() # sino se crea un nuevo servicio
+            service_request = Service()
             service_request.service_id.CopyFrom(service_id)
             service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN
             service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
@@ -113,7 +109,6 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
         service_request = Service()
         service_request.CopyFrom(_service)
 
-#actualiza el servicio con la info de la slice
         # pylint: disable=no-member
         copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids)
         copy_constraints(request.slice_constraints, service_request.service_constraints)
@@ -167,11 +162,11 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
         slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
         context_client.SetSlice(slice_active)
 
-        service_client.close() #liberar recursos, que es realmente?
-        context_client.close() #db teraflow
+        service_client.close()
+        context_client.close()
         return slice_id
 
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) #agrega funcionalidades de metrica y seguridad
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def CreateSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
         #try:
         #    slice_ = context_client.GetSlice(request.slice_id)
@@ -201,7 +196,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def DeleteSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty:
-        context_client = ContextClient() #coge la info de una slice
+        context_client = ContextClient()
         try:
             _slice = context_client.GetSlice(request)
         except: # pylint: disable=bare-except
@@ -210,11 +205,8 @@ class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio g
 
         _slice_rw = Slice()
         _slice_rw.CopyFrom(_slice)
-        #cambia el status
         _slice_rw.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member
         context_client.SetSlice(_slice_rw)
-#elimina la slice considerando si es interdominio o no, y desagrupa la slice eliminada
-        #elimina los servicios asociados a la slice
         if is_inter_domain(context_client, _slice.slice_endpoint_ids):
             interdomain_client = InterdomainClient()
             slice_id = interdomain_client.DeleteSlice(request)
diff --git a/src/slice/service/slice_grouper/SliceGrouper.py b/src/slice/service/slice_grouper/SliceGrouper.py
index d59531a1b10541db85019c70509a19b82f9a68ca..11aa9bb58dad37d229717f6c7135aec0602b7968 100644
--- a/src/slice/service/slice_grouper/SliceGrouper.py
+++ b/src/slice/service/slice_grouper/SliceGrouper.py
@@ -14,7 +14,7 @@
 
 import logging, pandas, threading
 from typing import Dict, Optional, Tuple
-from sklearn.cluster import KMeans #algoritmo de agrupamiento de scikit-learn (biblio de aprendizaje automatico)
+from sklearn.cluster import KMeans
 from common.proto.context_pb2 import Slice
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from .Constants import SLICE_GROUPS
@@ -27,30 +27,30 @@ LOGGER = logging.getLogger(__name__)
 
 class SliceGrouper:
     def __init__(self) -> None:
-        self._lock = threading.Lock() #controla el acceso concurrente
-        self._is_enabled = is_slice_grouping_enabled() #esta habilitado el agrupamiento de slices?
+        self._lock = threading.Lock()
+        self._is_enabled = is_slice_grouping_enabled()
         LOGGER.info('Slice Grouping: {:s}'.format('ENABLED' if self._is_enabled else 'DISABLED'))
         if not self._is_enabled: return
 
-        metrics_exporter = MetricsExporter() #instancia de la clase 
+        metrics_exporter = MetricsExporter()
         metrics_exporter.create_table()
 
-        self._slice_groups = create_slice_groups(SLICE_GROUPS) #grupos de slices
+        self._slice_groups = create_slice_groups(SLICE_GROUPS)
 
         # Initialize and fit K-Means with the pre-defined clusters we want, i.e., one per slice group
-        df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps']) #data frame con info de los grupos
-        k_means = KMeans(n_clusters=df_groups.shape[0]) #modelo utilizado para el agrupamiento
+        df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps'])
+        k_means = KMeans(n_clusters=df_groups.shape[0])
         k_means.fit(df_groups[['availability', 'capacity_gbps']])
         df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity_gbps']])
         self._k_means = k_means
         self._df_groups = df_groups
 
-        self._group_mapping : Dict[str, Dict] = { #Dict = dictionary
+        self._group_mapping : Dict[str, Dict] = {
             group['name']:{k:v for k,v in group.items() if k != 'name'}
-            for group in list(df_groups.to_dict('records')) #mapeo de nombres de grupo a sus atributos
-        }
+            for group in list(df_groups.to_dict('records'))
+            }
 
-        label_to_group = {} #mapeo de etiquetas a nombres de grupo
+        label_to_group = {}
         for group_name,group_attrs in self._group_mapping.items():
             label = group_attrs['label']
             availability = group_attrs['availability']
@@ -60,7 +60,7 @@ class SliceGrouper:
             label_to_group[label] = group_name
         self._label_to_group = label_to_group
 
-    def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]: #selecciona un grupo para una slice
+    def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]:
         with self._lock:
             grouping_parameters = get_slice_grouping_parameters(slice_obj)
             LOGGER.debug('[_select_group] grouping_parameters={:s}'.format(str(grouping_parameters)))
@@ -78,16 +78,16 @@ class SliceGrouper:
             return group_name, availability, capacity_gbps
 
     @property
-    def is_enabled(self): return self._is_enabled #indica si el agrupamiento de slices esta habilitado
-
-    def group(self, slice_obj : Slice) -> bool: #determina el grupo al que debe pertenecer la slice
+    def is_enabled(self): return self._is_enabled
+    
+    def group(self, slice_obj : Slice) -> bool:
         LOGGER.debug('[group] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
         selected_group = self._select_group(slice_obj)
         LOGGER.debug('[group] selected_group={:s}'.format(str(selected_group)))
         if selected_group is None: return False
         return add_slice_to_group(slice_obj, selected_group)
 
-    def ungroup(self, slice_obj : Slice) -> bool: # desagrupa la slice de un grupo
+    def ungroup(self, slice_obj : Slice) -> bool:
         LOGGER.debug('[ungroup] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
         selected_group = self._select_group(slice_obj)
         LOGGER.debug('[ungroup] selected_group={:s}'.format(str(selected_group)))
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index eba758ff333c644d102c7999362dcfb95026b194..75f036befd4bed3bb3bd743b9f423bf21c014e55 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -12,8 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import base64, json, logging
-import traceback #, re
+import base64, json, logging #, re
 from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
 from common.proto.context_pb2 import ContextList, Empty, TopologyId, TopologyList
 from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
@@ -114,7 +113,6 @@ def home():
     except Exception as e: # pylint: disable=broad-except
         LOGGER.exception('Descriptor load failed')
         flash(f'Descriptor load failed: `{str(e)}`', 'danger')
-        traceback.print_exc()  # Agregar esta línea para imprimir el traceback completo
     finally:
         context_client.close()
         device_client.close()