diff --git a/src/l3_attackmitigator/.gitlab-ci.yml b/src/l3_attackmitigator/.gitlab-ci.yml
index 6a936ec1c785ecebddcda520f93c88c527cf1785..e20771744b43a9209c8b9a193a1fb126008bb006 100644
--- a/src/l3_attackmitigator/.gitlab-ci.yml
+++ b/src/l3_attackmitigator/.gitlab-ci.yml
@@ -36,7 +36,7 @@ unit_test l3_attackmitigator:
     - docker ps -a
     - docker port $IMAGE_NAME
     - docker logs $IMAGE_NAME
-    - docker exec -i $IMAGE_NAME bash -c "pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py"
+    - docker exec -i $IMAGE_NAME bash -c "pytest --log-level=DEBUG --verbose -o log_cli=true $IMAGE_NAME/tests/test_unitary.py"
   after_script:
     - docker stop $IMAGE_NAME
     - docker rm $IMAGE_NAME
diff --git a/src/l3_attackmitigator/service/l3_attackmitigatorService.py b/src/l3_attackmitigator/service/l3_attackmitigatorService.py
index 254457c705e7311b34d5a2172fd208c3a3bf6657..9bfd2e06c774ee19f989004015e6ffbf3e035b87 100644
--- a/src/l3_attackmitigator/service/l3_attackmitigatorService.py
+++ b/src/l3_attackmitigator/service/l3_attackmitigatorService.py
@@ -71,8 +71,8 @@ class l3_attackmitigatorService:
         )  # pylint: disable=maybe-no-member
 
         LOGGER.debug("Service started")
-        print('Setting up')
-        self.l3_attackmitigator_servicer.setupl3_attackmitigator()
+        #print('Setting up')
+        #self.l3_attackmitigator_servicer.setupl3_attackmitigator()
 
     def stop(self):
         LOGGER.debug(
diff --git a/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py b/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
index 660d790bf827469904624189f7b4ec70be746b35..ce365548bc28384d7c6443a611179ae68e0cc0f5 100644
--- a/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
+++ b/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
@@ -30,28 +30,27 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
     def __init__(self):
         LOGGER.debug("Creating Servicer...")
     
-    class Mitigator(L3AttackmitigatorServicer):
-        def send_output(self, request, context):
-            # SEND CONFIDENCE TO MITIGATION SERVER
-            logging.debug("")
-            print("Server received mitigation values...", request.confidence)
-            LAST_VALUE = request.confidence
-            LAST_TAG = request.tag
-            # RETURN OK TO THE CALLER
-            return EmptyMitigator(
-                message=f"OK, received values: {LAST_TAG} with confidence {LAST_VALUE}."
-            )
-
-        def get_mitigation(self, request, context):
-            # GET OR PERFORM MITIGATION STRATEGY
-            logging.debug("")
-            print("Returing mitigation strategy...")
-            k = LAST_VALUE * 2
-            return EmptyMitigator(
-                message=f"Mitigation with double confidence = {k}"
-            )
+    def SendOutput(self, request, context):
+        # SEND CONFIDENCE TO MITIGATION SERVER
+        logging.debug("")
+        print("Server received mitigation values...", request.confidence)
+        LAST_VALUE = request.confidence
+        LAST_TAG = request.tag
+        # RETURN OK TO THE CALLER
+        return EmptyMitigator(
+            message=f"OK, received values: {LAST_TAG} with confidence {LAST_VALUE}."
+        )
 
+    def GetMitigation(self, request, context):
+        # GET OR PERFORM MITIGATION STRATEGY
+        logging.debug("")
+        print("Returing mitigation strategy...")
+        k = LAST_VALUE * 2
+        return EmptyMitigator(
+            message=f"Mitigation with double confidence = {k}"
+        )
 
+    '''
     def serve(self):
         server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
         add_L3AttackmitigatorServicer_to_server(self.Mitigator(), server)
@@ -64,7 +63,7 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
     def setupl3_attackmitigator(self):
         logging.basicConfig()
         self.serve()
-
+    '''
 
 
     
diff --git a/src/l3_attackmitigator/tests/test_unitary.py b/src/l3_attackmitigator/tests/test_unitary.py
index 072fe92c123e8c8a0c36a7a364a93c5f3c8f3523..7ef202f2455d605f33c6ab19f272647009316d20 100644
--- a/src/l3_attackmitigator/tests/test_unitary.py
+++ b/src/l3_attackmitigator/tests/test_unitary.py
@@ -3,6 +3,7 @@ import grpc
 import logging
 import pytest
 import multiprocessing
+import subprocess
 import time
 from l3_attackmitigator.proto.monitoring_pb2 import Kpi, KpiList
 from common.orm.Factory import get_database_backend as get_database, BackendEnum as DatabaseEngineEnum
@@ -21,12 +22,12 @@ port = 10000 + GRPC_SERVICE_PORT  # avoid privileged ports
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-'''
 @pytest.fixture(scope='session')
 def l3_attackmitigator_service():
     _service = l3_attackmitigatorService(
         port=port, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD)
     _service.start()
+    LOGGER.info('Server started on '+str(port))
     yield _service
     _service.stop()
 
@@ -36,23 +37,13 @@ def l3_attackmitigator_client(l3_attackmitigator_service):
     _client = l3_attackmitigatorClient(address='127.0.0.1', port=port)
     yield _client
     _client.close()
-'''
+
 
 def test_demo():
-    print('Demo Test')
+    LOGGER.info('Demo Test')
     pass
 
-def test_grpc_server():
-    '''
-    print('Starting AM')
-    _service = l3_attackmitigatorService(
-        port=port, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD)
-    p1 = multiprocessing.Process(target=_service.start, args=())
-    #_service.start()
-    p1.start()
-    '''
-    print('Test Started')
-    time.sleep(10)
+def test_grpc_server(l3_attackmitigator_service):
     output_message = {
             "confidence": 0.8,
             "timestamp": "date",
@@ -68,16 +59,14 @@ def test_grpc_server():
         }
     
     def open_channel(input_information):
-        with grpc.insecure_channel("localhost:10002") as channel:
+        LOGGER.info(str(f"localhost:{port}"))
+        with grpc.insecure_channel(f"localhost:{port}") as channel:
             stub = L3AttackmitigatorStub(channel)
             response = stub.SendOutput(input_information)
-            print("Inferencer send_input sent and received: ",response.message)
+            LOGGER.info("Inferencer send_input sent and received: "+str(response.message))
     try:
         open_channel(Output(**output_message))
+        LOGGER.info('Success!')
     except:
-        #p1.terminate()
-        assert 0=="Couldn't open channel"
+        assert 0=="GRPC server failed"
 
-    #p1.terminate()
-    print('Test Stopped')
-    #_service.stop()
diff --git a/src/l3_centralizedattackdetector/.gitlab-ci.yml b/src/l3_centralizedattackdetector/.gitlab-ci.yml
index deda06a69948a2915f761e262ef016a385b1bde3..aaebe01e4dce047b8c84a515283231d06cd26dab 100644
--- a/src/l3_centralizedattackdetector/.gitlab-ci.yml
+++ b/src/l3_centralizedattackdetector/.gitlab-ci.yml
@@ -34,8 +34,9 @@ unit_test l3_centralizedattackdetector:
     - docker ps -a
     - sleep 5
     - docker ps -a
+    - docker port $IMAGE_NAME
     - docker logs $IMAGE_NAME
-    - docker exec -i $IMAGE_NAME bash -c "pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py"
+    - docker exec -i $IMAGE_NAME bash -c "pytest --log-level=DEBUG --verbose -o log_cli=true $IMAGE_NAME/tests/test_unitary.py"
   after_script:
     - docker stop $IMAGE_NAME
     - docker rm $IMAGE_NAME
diff --git a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorService.py b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorService.py
index 1bcb50e1e07a1115b457921b1a965791ba4437b7..dd4a2fce9d078645c08a26090f209cc2425698e2 100644
--- a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorService.py
+++ b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorService.py
@@ -71,7 +71,7 @@ class l3_centralizedattackdetectorService:
         )  # pylint: disable=maybe-no-member
 
         LOGGER.debug("Service started")
-        self.l3_centralizedattackdetector_servicer.setup_l3_centralizedattackdetector()
+        #self.l3_centralizedattackdetector_servicer.setup_l3_centralizedattackdetector()
 
     def stop(self):
         LOGGER.debug(
diff --git a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
index 4ff4f58eebe1e1c5adbe5aa5cd696139a93d1641..993198c5e2f641ee0d0669e6d1b2d259afea14bc 100644
--- a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
+++ b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
@@ -38,90 +38,88 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
 
     def __init__(self):
         LOGGER.debug("Creating Servicer...")
-    
-    class CAD(L3CentralizedattackdetectorServicer):
-        def __init__(self, ml_model):
-            self.ml_model = ml_model
-
-        def send_input(self, request, context):
-            # PERFORM INFERENCE WITH SENT INPUTS
-            logging.debug("")
-            print("Inferencing ...")
-
-            # STORE VALUES
-            INFERENCE_VALUES.append(request)
-
-            # MAKE INFERENCE
-            output = self.make_inference(request)
-
-            # SEND INFO TO MITIGATION SERVER
-            try:
-                with grpc.insecure_channel("localhost:10002") as channel:
-                    stub = l3_attackmitigatorStub(channel)
-                    print("Sending to mitigator...")
-                    response = stub.SendOutput(output)
-                    print("Sent output to mitigator and received: ", response.message)
-
-                # RETURN "OK" TO THE CALLER
-                return Empty(
-                    message="OK, information received and mitigator notified"
-                )
-            except:
-                print('Couldnt find l3_attackmitigator')
-                return Empty(
-                        message="Mitigator Not found"
-                )
-
-        def make_inference(self, request):
-            # ML MODEL
-            # new_predictions = model.predict_proba(list(new_connections.values()))
-            predictions = self.ml_model.predict_proba(
-                [
-                    [
-                        request.n_packets_server_seconds,
-                        request.n_packets_client_seconds,
-                        request.n_bits_server_seconds,
-                        request.n_bits_client_seconds,
-                        request.n_bits_server_n_packets_server,
-                        request.n_bits_client_n_packets_client,
-                        request.n_packets_server_n_packets_client,
-                        request.n_bits_server_n_bits_client,
-                    ]
-                ]
+        with open(MODEL_FILE, "rb") as f:
+            self.ml_model = pkl.load(f)
+
+    def SendInput(self, request, context):
+        # PERFORM INFERENCE WITH SENT INPUTS
+        logging.debug("")
+        print("Inferencing ...")
+
+        # STORE VALUES
+        INFERENCE_VALUES.append(request)
+
+        # MAKE INFERENCE
+        output = self.make_inference(request)
+
+        # SEND INFO TO MITIGATION SERVER
+        try:
+            with grpc.insecure_channel("localhost:10002") as channel:
+                stub = L3AttackmitigatorStub(channel)
+                print("Sending to mitigator...")
+                response = stub.SendOutput(output)
+                print("Sent output to mitigator and received: ", response.message)
+
+            # RETURN "OK" TO THE CALLER
+            return Empty(
+                message="OK, information received and mitigator notified"
+            )
+        except:
+            print('Couldnt find l3_attackmitigator')
+            return Empty(
+                    message="Mitigator Not found"
             )
-            # Output format
-            output_message = {
-                "confidence": None,
-                "timestamp": datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
-                "ip_o": request.ip_o,
-                "tag_name": None,
-                "tag": None,
-                "flow_id": request.flow_id,
-                "protocol": request.protocol,
-                "port_d": request.port_d,
-                "ml_id": "RandomForest",
-                "time_start": request.time_start,
-                "time_end": request.time_end,
-            }
-            if predictions[0][1] >= 0.5:
-                output_message["confidence"] = predictions[0][1]
-                output_message["tag_name"] = "Crypto"
-                output_message["tag"] = 1
-            else:
-                output_message["confidence"] = predictions[0][0]
-                output_message["tag_name"] = "Normal"
-                output_message["tag"] = 0
-
-            return Output(**output_message)
-
-        def get_output(self, request, context):
-            logging.debug("")
-            print("Returing inference output...")
-            k = np.multiply(INFERENCE_VALUES, [2])
-            k = np.sum(k)
-            return self.make_inference(k)
-
 
+    def make_inference(self, request):
+        # ML MODEL
+        # new_predictions = model.predict_proba(list(new_connections.values()))
+        predictions = self.ml_model.predict_proba(
+            [
+                [
+                    request.n_packets_server_seconds,
+                    request.n_packets_client_seconds,
+                    request.n_bits_server_seconds,
+                    request.n_bits_client_seconds,
+                    request.n_bits_server_n_packets_server,
+                    request.n_bits_client_n_packets_client,
+                    request.n_packets_server_n_packets_client,
+                    request.n_bits_server_n_bits_client,
+                ]
+            ]
+        )
+        # Output format
+        output_message = {
+            "confidence": None,
+            "timestamp": datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
+            "ip_o": request.ip_o,
+            "tag_name": None,
+            "tag": None,
+            "flow_id": request.flow_id,
+            "protocol": request.protocol,
+            "port_d": request.port_d,
+            "ml_id": "RandomForest",
+            "time_start": request.time_start,
+            "time_end": request.time_end,
+        }
+        if predictions[0][1] >= 0.5:
+            output_message["confidence"] = predictions[0][1]
+            output_message["tag_name"] = "Crypto"
+            output_message["tag"] = 1
+        else:
+            output_message["confidence"] = predictions[0][0]
+            output_message["tag_name"] = "Normal"
+            output_message["tag"] = 0
+
+        return Output(**output_message)
+
+    def GetOutput(self, request, context):
+        logging.debug("")
+        print("Returing inference output...")
+        k = np.multiply(INFERENCE_VALUES, [2])
+        k = np.sum(k)
+        return self.make_inference(k)
+
+    '''
     def setup_l3_centralizedattackdetector(self):
         print('Starting CAD')
         with open(MODEL_FILE, "rb") as f:
@@ -138,7 +136,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         reply = Empty()
         LOGGER.debug('DetectAttack reply: {}'.format(str(reply)))
         return reply
-
+    '''
 
 
 
diff --git a/src/l3_centralizedattackdetector/tests/test_unitary.py b/src/l3_centralizedattackdetector/tests/test_unitary.py
index 8e2e4aaede0b72c8f4b2103b189866793f3b1fb9..e7cabcba5bf72ee95a4724e2193695879e6df75f 100644
--- a/src/l3_centralizedattackdetector/tests/test_unitary.py
+++ b/src/l3_centralizedattackdetector/tests/test_unitary.py
@@ -2,9 +2,6 @@ import copy
 import grpc
 import logging
 import pytest
-import multiprocessing
-from time import sleep
-from common.orm.Factory import get_database_backend as get_database, BackendEnum as DatabaseEngineEnum
 from l3_centralizedattackdetector.Config import GRPC_SERVICE_PORT, GRPC_MAX_WORKERS, GRPC_GRACE_PERIOD
 from l3_centralizedattackdetector.client.l3_centralizedattackdetectorClient import l3_centralizedattackdetectorClient
 from l3_centralizedattackdetector.service.l3_centralizedattackdetectorService import l3_centralizedattackdetectorService
@@ -20,17 +17,10 @@ port = 10000 + GRPC_SERVICE_PORT  # avoid privileged ports
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-
 @pytest.fixture(scope='session')
-def database():
-    _database = get_database(engine=DatabaseEngineEnum.INMEMORY)
-    return _database
-
-
-@pytest.fixture(scope='session')
-def l3_centralizedattackdetector_service(database):
+def l3_centralizedattackdetector_service():
     _service = l3_centralizedattackdetectorService(
-        database, port=port, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD)
+        port=port, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD)
     _service.start()
     yield _service
     _service.stop()
@@ -43,9 +33,10 @@ def l3_centralizedattackdetector_client(l3_centralizedattackdetector_service):
     _client.close()
 
 def test_demo():
+    LOGGER.info('Demo Test')
     pass
 
-def test_system():
+def test_system(l3_centralizedattackdetector_service):
     inference_information = {
             "n_packets_server_seconds": 5.0,
             "n_packets_client_seconds": 5.0,
@@ -65,30 +56,13 @@ def test_system():
             "time_end": 10.0,
     }
     def open_channel(input_information):
-        with grpc.insecure_channel("localhost:10001") as channel:
+        LOGGER.info(str(f"localhost:{port}"))
+        with grpc.insecure_channel(f"localhost:{port}") as channel:
             stub = L3CentralizedattackdetectorStub(channel)
-            response = stub.send_input(ModelInput(**input_information))
-            print("Cad send_input sent and received: ", response.message)
-
-    #print('Starting AM')
-    #am_service = l3_attackmitigatorService(
-    #    database, port=port+1, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD)
-    #p1 = multiprocessing.Process(target=am_service.start, args=())
-    #p1.start()
-    #sleep(5)
-    cad_service = l3_centralizedattackdetectorService(
-        database, port=port, max_workers=GRPC_MAX_WORKERS, grace_period=GRPC_GRACE_PERIOD)
-    p2 = multiprocessing.Process(
-        target=cad_service.start, args=())
-    p2.start()
-    sleep(10)
-    print('All started!')
+            response = stub.SendInput(input_information)
+            LOGGER.info("Cad send_input sent and received: "+str(response.message))
     try:
-        open_channel(inference_information)
+        open_channel(ModelInput(**inference_information))
     except:
-        p2.terminate()
-        assert 0=="Couldn't open channel"
-    #p1.terminate()
-    p2.terminate()
-    print('All Done!')
+        assert 0=="GRPC server failed"