diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml
index 4447a1427980be6554228087924bf8e4ca775758..06ac823a169c2b08f46a225db3fe04defe7e87f4 100644
--- a/manifests/monitoringservice.yaml
+++ b/manifests/monitoringservice.yaml
@@ -36,7 +36,7 @@ spec:
         - containerPort: 9192
         env:
         - name: LOG_LEVEL
-          value: "INFO"
+          value: "DEBUG"
         envFrom:
         - secretRef:
             name: qdb-data
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 3fa4a6e0dc256ba964fd4ee26a8b7095bb2303f4..801c06f52c8cb7d725ac4071e6d5fc99a504a291 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -36,7 +36,7 @@ spec:
         - containerPort: 9192
         env:
         - name: LOG_LEVEL
-          value: "INFO"
+          value: "DEBUG"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:3030"]
diff --git a/my_deploy.sh b/my_deploy.sh
index 6f0e64afe311b8e56446caabfac6329024c207a9..e0e4415d46badcc1cab527ac936c43ada79dc7b0 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -1,93 +1,29 @@
-#!/bin/bash
-# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
 # ----- TeraFlowSDN ------------------------------------------------------------
-
-# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
-
-# Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator"
-
-# Set the tag you want to use for your images.
+export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator l3_attackmitigator l3_centralizedattackdetector"
 export TFS_IMAGE_TAG="dev"
-
-# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs"
-
-# Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
-
-# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
-
-# Disable skip-build flag to rebuild the Docker images.
 export TFS_SKIP_BUILD=""
 
-
 # ----- CockroachDB ------------------------------------------------------------
-
-# Set the namespace where CockroackDB will be deployed.
 export CRDB_NAMESPACE="crdb"
-
-# Set the database username to be used by Context.
 export CRDB_USERNAME="tfs"
-
-# Set the database user's password to be used by Context.
 export CRDB_PASSWORD="tfs123"
-
-# Set the database name to be used by Context.
 export CRDB_DATABASE="tfs"
-
-# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
-# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
 export CRDB_DEPLOY_MODE="single"
-
-# Disable flag for dropping database, if exists.
 export CRDB_DROP_DATABASE_IF_EXISTS=""
-
-# Disable flag for re-deploying CockroachDB from scratch.
 export CRDB_REDEPLOY=""
 
-
 # ----- NATS -------------------------------------------------------------------
-
-# Set the namespace where NATS will be deployed.
 export NATS_NAMESPACE="nats"
-
-# Disable flag for re-deploying NATS from scratch.
 export NATS_REDEPLOY=""
 
-
 # ----- QuestDB ----------------------------------------------------------------
-
-# If not already set, set the namespace where QuestDB will be deployed.
 export QDB_NAMESPACE="qdb"
-
-# If not already set, set the database username to be used by Monitoring.
 export QDB_USERNAME="admin"
-
-# If not already set, set the database user's password to be used by Monitoring.
 export QDB_PASSWORD="quest"
-
-# If not already set, set the table name to be used by Monitoring.
 export QDB_TABLE="tfs_monitoring"
-
-## If not already set, disable flag for dropping table if exists.
-#export QDB_DROP_TABLE_IF_EXISTS=""
-
-# If not already set, disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
+
diff --git a/proto/l3_centralizedattackdetector.proto b/proto/l3_centralizedattackdetector.proto
index cefab931401e49dd6650815b5831dd8ba6d14268..b1b50333cd21cf0f5f03c8bb0425364610ab3a76 100644
--- a/proto/l3_centralizedattackdetector.proto
+++ b/proto/l3_centralizedattackdetector.proto
@@ -22,6 +22,9 @@ service L3Centralizedattackdetector {
 
   // Sends a batch of inputs to the ML model in the CAD component
   rpc SendInputBatch (L3CentralizedattackdetectorModelInput) returns (Empty) {}
+
+  // DAD request of the list of features in CAD
+  rpc SendFeatures (Empty) returns (AutoFeatures) {}
 }
 
 message Feature {
@@ -46,18 +49,7 @@ message L3CentralizedattackdetectorMetrics {
 	
 	repeated Feature features = 1;	
 	ConnectionMetadata connection_metadata = 2;
-	/*
-	string ip_o = 2;
-	string port_o = 3;
-	string ip_d = 4;
-	string port_d = 5;
-	string flow_id = 6;
-	context.ServiceId service_id = 7;
-	context.EndPointId endpoint_id = 8;
-	string protocol = 9;
-	float time_start = 10;
-	float time_end = 11;
-	*/
+
 }
 
 message ConnectionMetadata {
@@ -73,6 +65,11 @@ message ConnectionMetadata {
 	float time_end = 10;
 }
 
+// Collection of int values representing ML features
+message AutoFeatures {
+	repeated float autoFeatures = 1;
+}
+
 // Collection (batcb) of model inputs that will be sent to the model
 message L3CentralizedattackdetectorModelInput {
 	repeated L3CentralizedattackdetectorMetrics metrics = 1;
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index 1987be15ddef49f1756909ce9203d1aaa574d6f0..1f4b202eb1b43d40eea21d3dd5d460744f8c3869 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -127,6 +127,9 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
 
             # TODO: use of datastores (might be virtual ones) to enable rollbacks
             resources_to_set, resources_to_delete = compute_rules_to_add_delete(device, request)
+            
+            for resource in resources_to_set:
+                LOGGER.debug('Resource to set: %s', resource)
 
             errors = []
             errors.extend(configure_rules(device, driver, resources_to_set))
diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py
index 7dd61085b0f492d7a1d7873a9e1fe3f73a7b407c..45ab0de1bebc6ebab47acecdea2d3a10030cdcb0 100644
--- a/src/device/service/Tools.py
+++ b/src/device/service/Tools.py
@@ -189,6 +189,9 @@ def compute_rules_to_add_delete(
 
 def configure_rules(device : Device, driver : _Driver, resources_to_set : List[Tuple[str, Any]]) -> List[str]:
     if len(resources_to_set) == 0: return []
+    
+    for resource_key, resource_value in resources_to_set:
+        LOGGER.debug('Setting config rule: %s = %s', resource_key, resource_value)
 
     results_setconfig = driver.SetConfig(resources_to_set)
     results_setconfig = [
diff --git a/src/device/service/driver_api/DriverFactory.py b/src/device/service/driver_api/DriverFactory.py
index 2e89bd4c565d425dab7556d30dc82151271fcb3b..9b30e86d29562ddfbf73f714c78adbaedef05bd4 100644
--- a/src/device/service/driver_api/DriverFactory.py
+++ b/src/device/service/driver_api/DriverFactory.py
@@ -69,6 +69,7 @@ class DriverFactory:
 
             field_candidate_driver_classes = set()
             for field_value in field_values:
+                LOGGER.info("field_value: %s", field_value)
                 if field_enum_values is not None and field_value not in field_enum_values:
                     raise UnsupportedFilterFieldValueException(field_name, field_value, field_enum_values)
                 field_indice_drivers = field_indice.get(field_value)
diff --git a/src/device/service/driver_api/DriverInstanceCache.py b/src/device/service/driver_api/DriverInstanceCache.py
index 1f92059a63889c002eb28ca7eaecc43199f66794..a26a2a856b2f49814d4361cab3e4c34bf4c14d35 100644
--- a/src/device/service/driver_api/DriverInstanceCache.py
+++ b/src/device/service/driver_api/DriverInstanceCache.py
@@ -80,9 +80,13 @@ def get_driver(driver_instance_cache : DriverInstanceCache, device : Device) ->
 
     driver : _Driver = driver_instance_cache.get(device_uuid)
     if driver is not None: return driver
+    
+    LOGGER.info('[get_driver] device = {:s}'.format(str(device)))
 
     driver_filter_fields = get_device_driver_filter_fields(device)
     connect_rules = get_connect_rules(device.device_config)
+    
+    LOGGER.info('[get_driver] driver_filter_fields = {:s}'.format(str(driver_filter_fields)))
 
     #LOGGER.info('[get_driver] connect_rules = {:s}'.format(str(connect_rules)))
     address  = connect_rules.get('address',  '127.0.0.1')
@@ -106,4 +110,6 @@ def get_driver(driver_instance_cache : DriverInstanceCache, device : Device) ->
 def preload_drivers(driver_instance_cache : DriverInstanceCache) -> None:
     context_client = ContextClient()
     devices = context_client.ListDevices(Empty())
-    for device in devices.devices: get_driver(driver_instance_cache, device)
+    for device in devices.devices:
+        LOGGER.info('[preload_drivers] device = {:s}'.format(str(device)))
+        get_driver(driver_instance_cache, device)
diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py
index 14925f9f78d143cd998065a43afb624b20c04bfb..0388f5db28a92298b5ec98a106d136627e25c31c 100644
--- a/src/device/service/drivers/emulated/EmulatedDriver.py
+++ b/src/device/service/drivers/emulated/EmulatedDriver.py
@@ -134,6 +134,7 @@ class EmulatedDriver(_Driver):
         resolver = anytree.Resolver(pathattr='name')
         with self.__lock:
             for i,resource in enumerate(resources):
+                LOGGER.debug('SetConfig: resource[#{:d}]: {:s}'.format(i, str(resource)))
                 str_resource_name = 'resources[#{:d}]'.format(i)
                 try:
                     chk_type(str_resource_name, resource, (list, tuple))
diff --git a/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py b/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
index 4d41c7d4308dcc624d0c92684157a3ea3cb33e8c..30f6338478bcedae146598f43ea080c4c2eceba6 100644
--- a/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
+++ b/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
@@ -84,9 +84,11 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
         # Set RuleSet for this ACL ConfigRule
         acl_rule_set = acl_config_rule.acl.rule_set
         # TODO: update the following parameters; for instance, add them as parameters of the method configure_acl_rule
-        acl_rule_set.name = "DROP-HTTPS"
+        # acl_rule_set.name = "DROP-HTTPS"
+        acl_rule_set.name = "DROP-TCP"
         acl_rule_set.type = AclRuleTypeEnum.ACLRULETYPE_IPV4
-        acl_rule_set.description = "DROP undesired HTTPS traffic"
+        # acl_rule_set.description = "DROP undesired HTTPS traffic"
+        acl_rule_set.description = "DROP undesired TCP traffic"
 
         # Add ACLEntry to the ACLRuleSet
         acl_entry = acl_rule_set.entries.add()
diff --git a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
index 18afc561b6db99a5237ec1efc41ada501c150c24..9017bec6988e2d56b8b0e24b26e0ab177ca09b64 100644
--- a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
+++ b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
@@ -59,8 +59,10 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         self.inference_results = []
         self.model = rt.InferenceSession(MODEL_FILE)
         
-        meta = self.model.get_modelmeta()
-        LOGGER.debug(meta.custom_metadata_map)
+        self.meta = list(self.model.get_modelmeta().custom_metadata_map.values())
+        self.meta = [int(x) for x in self.meta]
+        self.meta.sort()
+        LOGGER.debug(self.meta)
         LOGGER.debug("Prueba onnx")
         
         self.input_name = self.model.get_inputs()[0].name
diff --git a/src/l3_centralizedattackdetector/service/ml_model/crypto_auto_features.onnx b/src/l3_centralizedattackdetector/service/ml_model/crypto_auto_features.onnx
index 99df0c295323fefd605392367c367269aefe77b2..005b1c3e220dde4657e12fd34eab98a3649f6353 100644
Binary files a/src/l3_centralizedattackdetector/service/ml_model/crypto_auto_features.onnx and b/src/l3_centralizedattackdetector/service/ml_model/crypto_auto_features.onnx differ
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index 622abeee860cdb6ce8153b7def9fb91ea1117277..763852e43a4de31acf4de2949c963953500bc33e 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -16,7 +16,7 @@ import grpc, json, logging
 from typing import Optional
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.method_wrappers.ServiceExceptions import AlreadyExistsException, InvalidArgumentException
-from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum
+from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, DeviceId, Device
 from common.proto.pathcomp_pb2 import PathCompRequest
 from common.proto.service_pb2_grpc import ServiceServiceServicer
 from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
@@ -25,6 +25,10 @@ from pathcomp.frontend.client.PathCompClient import PathCompClient
 from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
 from .task_scheduler.TaskScheduler import TasksScheduler
 from .tools.ContextGetters import get_service
+from device.client.DeviceClient import DeviceClient
+from common.tools.object_factory.Device import json_device_id
+from google.protobuf.json_format import MessageToJson
+from common.tools.context_queries.Device import get_device
 
 LOGGER = logging.getLogger(__name__)
 
@@ -34,6 +38,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
     def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None:
         LOGGER.debug('Creating Servicer...')
         self.service_handler_factory = service_handler_factory
+        self.device_client = DeviceClient()
         LOGGER.debug('Servicer Created')
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
@@ -104,10 +109,38 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
         del service.service_constraints[:]  # pylint: disable=no-member
         for constraint in request.service_constraints:
             service.service_constraints.add().CopyFrom(constraint)  # pylint: disable=no-member
-
+        
         del service.service_config.config_rules[:]  # pylint: disable=no-member
         for config_rule in request.service_config.config_rules:
             service.service_config.config_rules.add().CopyFrom(config_rule) # pylint: disable=no-member
+            
+            # Forward ACL to Device
+            if config_rule.acl.rule_set.name == "DROP-TCP":
+                LOGGER.debug(f"[ACL - {config_rule.acl.rule_set.name}]")
+                
+                r1 = context_client.GetDevice(DeviceId(**json_device_id('R1-EMU')))
+                
+                # dev = Device()
+                # LOGGER.debug("Sending test device to Device")
+                # #device_id = context_client.SetDevice(dev)
+                # #dev = get_device(context_client, device_id.device_uuid.uuid, rw_copy=True)
+                # dev.CopyFrom(r1)
+                # self.device_client.ConfigureDevice(dev)
+                # LOGGER.debug("Now testing with R1-EMU")
+                
+                # print r1 as a formatted JSON string
+                LOGGER.debug("Original R1-EMU")
+                LOGGER.debug(json.dumps(json.loads(MessageToJson(r1)), indent=4))
+                
+                LOGGER.debug("Sending original R1-EMU to Device")
+                self.device_client.ConfigureDevice(r1)
+                
+                r1.device_config.config_rules.add().CopyFrom(config_rule)
+                LOGGER.debug("Modified R1-EMU")
+                LOGGER.debug(json.dumps(json.loads(MessageToJson(r1)), indent=4))
+                
+                LOGGER.debug("Sending modfied R1-EMU to Device")
+                self.device_client.ConfigureDevice(r1)
 
         service_id_with_uuids = context_client.SetService(service)
         service_with_uuids = context_client.GetService(service_id_with_uuids)