diff --git a/proto/l3_centralizedattackdetector.proto b/proto/l3_centralizedattackdetector.proto
index e251e6feb9981ae3821e6b580e3b966aebc3a889..fc1eddbeb3995ff8fcb67b35bbd4cb42f2ca8c65 100644
--- a/proto/l3_centralizedattackdetector.proto
+++ b/proto/l3_centralizedattackdetector.proto
@@ -17,17 +17,15 @@ syntax = "proto3";
 import "context.proto";
 
 service L3Centralizedattackdetector {
-  // Sends a greeting
+  // Sends single input to the ML model in the CAD component
   rpc SendInput (L3CentralizedattackdetectorMetrics) returns (Empty) {}
-  // Sends another greeting
-  rpc GetOutput (Empty) returns (L3CentralizedattackdetectorModelOutput) {}
+
+  // Sends a batch of inputs to the ML model in the CAD component
+  rpc SendInputBatch (L3CentralizedattackdetectorModelInput) returns (Empty) {}
 }
 
 message L3CentralizedattackdetectorMetrics {
-	/*
-	Model input sent to the Inferencer by the client
-	There are currently 9 values and 
-	*/
+	// Input sent by the DAD compoenent to the ML model integrated in the CAD component.
 
 	// Machine learning model features
 	float c_pkts_all = 1;
@@ -54,22 +52,13 @@ message L3CentralizedattackdetectorMetrics {
 	float time_end = 20;
 }
 
-message Empty {
-	string message = 1;
+// Collection (batcb) of model inputs that will be sent to the model
+message L3CentralizedattackdetectorModelInput {
+	repeated L3CentralizedattackdetectorMetrics metrics = 1;
 }
 
-message L3CentralizedattackdetectorModelOutput {
-	float confidence = 1;
-	string timestamp = 2;
-	string ip_o = 3;	
-	string tag_name = 4;
-	int32 tag = 5;
-	string flow_id = 6;
-	string protocol = 7;
-	string port_d = 8;
-	string ml_id = 9;
-	float time_start = 10;
-	float time_end = 11;
+message Empty {
+	string message = 1;
 }
 
 // Collections or streams?
@@ -77,8 +66,4 @@ message L3CentralizedattackdetectorModelOutput {
 message InputCollection {
 	repeated model_input = 1;
 }
-
-message OutputCollection {
-	repeated model_output = 1;
-}
 */
diff --git a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
index 26874a3264e995d71a6caa9c0a6cad00059597ed..01ae92ae02f409fafc7c2d6bc191f8672be5cb84 100644
--- a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
+++ b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
@@ -22,6 +22,7 @@ import numpy as np
 import onnxruntime as rt
 import logging
 from time import sleep
+import time
 
 from common.proto.l3_centralizedattackdetector_pb2 import Empty
 from common.proto.l3_centralizedattackdetector_pb2_grpc import L3CentralizedattackdetectorServicer
@@ -118,6 +119,13 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         self.time_interval_start = None
         self.time_interval_end = None
 
+        # CAD evaluation tests
+        self.cad_inference_times = []
+        self.cad_num_inference_measurements = 100
+
+        # AM evaluation tests
+        self.am_notification_times = []
+
     """
     Create a monitored KPI for a specific service and add it to the Monitoring Client
         -input: 
@@ -479,8 +487,56 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
             ]
         )
 
+        # Print input data shape
+        LOGGER.debug("x_data.shape: {}".format(x_data.shape))
+
+        # Get batch size
+        batch_size = x_data.shape[0]
+
+        # Print batch size
+        LOGGER.debug("batch_size: {}".format(batch_size))
+
+        # TEST: Remove later
+        test_batch_size = 1024
+        # duplicate x_data to test_batch_size
+        x_data = np.repeat(x_data, test_batch_size, axis=0)
+
+        LOGGER.debug("x_data.shape: {}".format(x_data.shape))
+
+        inference_time_start = time.perf_counter()
+
+        # Perform inference
         predictions = self.model.run([self.prob_name], {self.input_name: x_data.astype(np.float32)})[0]
 
+        inference_time_end = time.perf_counter()
+
+        # Measure inference time
+        inference_time = inference_time_end - inference_time_start
+        self.cad_inference_times.append(inference_time)
+
+        if len(self.cad_inference_times) > self.cad_num_inference_measurements:
+            inference_times_np_array = np.array(self.cad_inference_times)
+            np.save(f"inference_times_{test_batch_size}.npy", inference_times_np_array)
+
+            avg_inference_time = np.mean(inference_times_np_array)
+            max_inference_time = np.max(inference_times_np_array)
+            min_inference_time = np.min(inference_times_np_array)
+            std_inference_time = np.std(inference_times_np_array)
+            median_inference_time = np.median(inference_times_np_array)
+
+            LOGGER.debug("Average inference time: {}".format(avg_inference_time))
+            LOGGER.debug("Max inference time: {}".format(max_inference_time))
+            LOGGER.debug("Min inference time: {}".format(min_inference_time))
+            LOGGER.debug("Standard deviation inference time: {}".format(std_inference_time))
+            LOGGER.debug("Median inference time: {}".format(median_inference_time))
+
+            with open(f"inference_times_stats_{batch_size}.txt", "w") as f:
+                f.write("Average inference time: {}\n".format(avg_inference_time))
+                f.write("Max inference time: {}\n".format(max_inference_time))
+                f.write("Min inference time: {}\n".format(min_inference_time))
+                f.write("Standard deviation inference time: {}\n".format(std_inference_time))
+                f.write("Median inference time: {}\n".format(median_inference_time))
+
         # Gather the predicted class, the probability of that class and other relevant information required to block the attack
         output_message = {
             "confidence": None,
@@ -537,8 +593,12 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
 
         self.monitor_kpis()
 
+        LOGGER.debug("cryptomining_detector_output: {}".format(cryptomining_detector_output))
+
         # Only notify Attack Mitigator when a cryptomining connection has been detected
         if cryptomining_detector_output["tag_name"] == "Crypto":
+            notification_time_start = time.perf_counter()
+
             logging.info("Crypto attack detected")
 
             # Notify the Attack Mitigator component about the attack
@@ -550,6 +610,35 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
                 logging.info("Sending the connection information to the Attack Mitigator component...")
                 message = L3AttackmitigatorOutput(**cryptomining_detector_output)
                 response = self.attackmitigator_client.SendOutput(message)
+                notification_time_end = time.perf_counter()
+
+                self.am_notification_times.append(notification_time_end - notification_time_start)
+
+                LOGGER.debug(f"am_notification_times length: {len(self.am_notification_times)}")
+
+                if len(self.am_notification_times) > 100:
+                    am_notification_times_np_array = np.array(self.am_notification_times)
+                    np.save("am_notification_times.npy", am_notification_times_np_array)
+
+                    avg_notification_time = np.mean(am_notification_times_np_array)
+                    max_notification_time = np.max(am_notification_times_np_array)
+                    min_notification_time = np.min(am_notification_times_np_array)
+                    std_notification_time = np.std(am_notification_times_np_array)
+                    median_notification_time = np.median(am_notification_times_np_array)
+
+                    LOGGER.debug("Average notification time: {}".format(avg_notification_time))
+                    LOGGER.debug("Max notification time: {}".format(max_notification_time))
+                    LOGGER.debug("Min notification time: {}".format(min_notification_time))
+                    LOGGER.debug("Std notification time: {}".format(std_notification_time))
+                    LOGGER.debug("Median notification time: {}".format(median_notification_time))
+
+                    with open("am_notification_times_stats.txt", "w") as f:
+                        f.write("Average notification time: {}\n".format(avg_notification_time))
+                        f.write("Max notification time: {}\n".format(max_notification_time))
+                        f.write("Min notification time: {}\n".format(min_notification_time))
+                        f.write("Std notification time: {}\n".format(std_notification_time))
+                        f.write("Median notification time: {}\n".format(median_notification_time))
+
                 # logging.info("Attack Mitigator notified and received response: ", response.message)  # FIX No message received
                 logging.info("Attack Mitigator notified")
 
diff --git a/webui.sh b/webui.sh
new file mode 100755
index 0000000000000000000000000000000000000000..baa331a35b693617d4ad3daf18de9408b2dfba78
--- /dev/null
+++ b/webui.sh
@@ -0,0 +1,80 @@
+echo "Configuring WebUI DataStores and Dashboards..."
+sleep 3
+
+# INFLUXDB_HOST="monitoringservice"
+# INFLUXDB_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service/monitoringservice -o jsonpath='{.spec.ports[?(@.name=="influxdb")].port}')
+# INFLUXDB_URL="http://${INFLUXDB_HOST}:${INFLUXDB_PORT}"
+# INFLUXDB_USER=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_USER}' | base64 --decode)
+# INFLUXDB_PASSWORD=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_PASSWORD}' | base64 --decode)
+# INFLUXDB_DATABASE=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_DB}' | base64 --decode)
+
+# Exposed through the ingress controller "tfs-ingress"
+GRAFANA_HOSTNAME="127.0.0.1"
+GRAFANA_PORT="80"
+GRAFANA_BASEURL="/grafana"
+
+# Default Grafana credentials
+GRAFANA_USERNAME="admin"
+GRAFANA_PASSWORD="admin"
+
+# Default Grafana API URL
+GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
+
+# Updated Grafana API URL
+GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
+
+echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT
+
+echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..."
+
+# Configure Grafana Admin Password
+# Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
+curl -X PUT -H "Content-Type: application/json" -d '{
+    "oldPassword": "'${GRAFANA_PASSWORD}'",
+    "newPassword": "'${TFS_GRAFANA_PASSWORD}'",
+    "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
+}' ${GRAFANA_URL_DEFAULT}/api/user/password
+echo
+
+# Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
+# TODO: replace user, password and database by variables to be saved
+echo "Creating a datasource..."
+curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+    "access"   : "proxy",
+    "type"     : "postgres",
+    "name"     : "monitoringdb",
+    "url"      : "monitoringservice:8812",
+    "database" : "monitoring",
+    "user"     : "admin",
+    "basicAuth": false,
+    "isDefault": true,
+    "jsonData" : {
+        "sslmode"               : "disable",
+        "postgresVersion"       : 1100,
+        "maxOpenConns"          : 0,
+        "maxIdleConns"          : 2,
+        "connMaxLifetime"       : 14400,
+        "tlsAuth"               : false,
+        "tlsAuthWithCACert"     : false,
+        "timescaledb"           : false,
+        "tlsConfigurationMethod": "file-path",
+        "tlsSkipVerify"         : true
+    },
+    "secureJsonData": {
+        "password": "quest"
+    }
+}' ${GRAFANA_URL_UPDATED}/api/datasources
+echo
+
+# Create Monitoring Dashboard
+# Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
+curl -X POST -H "Content-Type: application/json" \
+-d '@src/webui/grafana_dashboard_psql.json' \
+${GRAFANA_URL_UPDATED}/api/dashboards/db
+echo
+
+DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tf-l3-monit"
+DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+
+printf "\n\n"
\ No newline at end of file