diff --git a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py index 70a3e6172b2f45ceacacf5074971e25ab12bdf75..d84e71ce63dee0e61109487b393605b24ad0f32c 100644 --- a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py +++ b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py @@ -36,7 +36,7 @@ from monitoring.client.MonitoringClient import MonitoringClient from common.proto.monitoring_pb2 import Kpi from common.tools.timestamp.Converters import timestamp_utcnow_to_float -from common.proto.context_pb2 import Timestamp, ServiceId, EndPointId, SliceId, DeviceId +from common.proto.context_pb2 import Timestamp, ServiceId, EndPointId, SliceId, ConnectionId from l3_attackmitigator.client.l3_attackmitigatorClient import l3_attackmitigatorClient @@ -64,15 +64,17 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto def __init__(self): LOGGER.info("Creating Centralized Attack Detector Service") - self.inference_values = Queue() - self.inference_results = Queue() + # self.inference_values = Queue() + # self.inference_results = Queue() + self.inference_values = [] + self.inference_results = [] self.model = rt.InferenceSession(MODEL_FILE) self.input_name = self.model.get_inputs()[0].name self.label_name = self.model.get_outputs()[0].name self.prob_name = self.model.get_outputs()[1].name self.monitoring_client = MonitoringClient() self.service_ids = [] - self.monitored_service_ids = Queue() + # self.monitored_service_ids = Queue() self.monitored_kpis = { "l3_security_status": { "kpi_id": None, @@ -111,22 +113,26 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto # Environment variables self.CLASSIFICATION_THRESHOLD = os.getenv("CAD_CLASSIFICATION_THRESHOLD", 0.5) - self.MONITORED_KPIS_TIME_INTERVAL_AGG = os.getenv("MONITORED_KPIS_TIME_INTERVAL_AGG", 30) + self.MONITORED_KPIS_TIME_INTERVAL_AGG = os.getenv("MONITORED_KPIS_TIME_INTERVAL_AGG", 5) # Constants self.NORMAL_CLASS = 0 self.CRYPTO_CLASS = 1 - # start monitoring process - self.monitoring_process = Process( - target=self.monitor_kpis, - args=( - self.monitored_service_ids, - self.inference_results, - ), - ) + # # start monitoring process + # self.monitoring_process = Process( + # target=self.monitor_kpis, + # args=( + # self.monitored_service_ids, + # self.inference_results, + # ), + # ) # self.monitoring_process.start() + self.kpi_test = None + self.time_interval_start = None + self.time_interval_end = None + """ Create a monitored KPI for a specific service and add it to the Monitoring Client -input: @@ -142,7 +148,8 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto service_id, device_id, endpoint_id, - # slice_id, + slice_id, + connection_id, kpi_name, kpi_description, kpi_sample_type, @@ -152,7 +159,8 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto kpidescriptor.service_id.service_uuid.uuid = service_id.service_uuid.uuid kpidescriptor.device_id.device_uuid.uuid = device_id.device_uuid.uuid kpidescriptor.endpoint_id.endpoint_uuid.uuid = endpoint_id.endpoint_uuid.uuid - # kpidescriptor.slice_id.slice_uuid.uuid = slice_id.slice_uuid.uuid + kpidescriptor.slice_id.slice_uuid.uuid = slice_id.slice_uuid.uuid + kpidescriptor.connection_id.connection_uuid.uuid = connection_id.connection_uuid.uuid kpidescriptor.kpi_sample_type = kpi_sample_type new_kpi = self.monitoring_client.SetKpi(kpidescriptor) @@ -173,19 +181,20 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto # for now, all the KPIs are created for all the services from which requests are received for kpi in self.monitored_kpis: # slice_ids_list = self.context_client.ListSliceIds(self.context_id)[0] - # # generate random slice_id - # slice_id = SliceId() - # slice_id.slice_uuid.uuid = str(uuid.uuid4()) + # generate random slice_id + slice_id = SliceId() + slice_id.slice_uuid.uuid = str(uuid.uuid4()) - # generate random device_id - device_id = DeviceId() - device_id.device_uuid.uuid = str(uuid.uuid4()) + # generate random connection_id + connection_id = ConnectionId() + connection_id.connection_uuid.uuid = str(uuid.uuid4()) created_kpi = self.create_kpi( service_id, device_id, endpoint_id, - # slice_id, + slice_id, + connection_id, kpi, self.monitored_kpis[kpi]["description"].format(service_id=service_id.service_uuid.uuid), self.monitored_kpis[kpi]["kpi_sample_type"], @@ -193,7 +202,10 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto self.monitored_kpis[kpi]["kpi_id"] = created_kpi.kpi_id self.monitored_kpis[kpi]["service_ids"].append(service_id.service_uuid.uuid) - self.monitoring_process.start() + LOGGER.info("Created KPIs for service {}".format(service_id)) + + # LOGGER.info("Starting monitoring process") + # self.monitoring_process.start() def monitor_kpis(self, service_ids, inference_results): self.monitoring_client_test = MonitoringClient() @@ -368,10 +380,13 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto "port_d": monitor_inference_results[i]["output"]["port_d"], } + is_unique = True + for j in range(len(unique_attack_conns_last_time_interval)): if current_attack_conn == unique_attack_conns_last_time_interval[j]: - break + is_unique = False + if is_unique: num_unique_attack_conns_last_time_interval += 1 unique_attack_conns_last_time_interval.append(current_attack_conn) @@ -380,7 +395,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto # L3 unique compromised clients kpi_unique_compromised_clients = Kpi() kpi_unique_compromised_clients.kpi_id.kpi_id.CopyFrom( - self.monitored_kpis["l3_unique_attack_conns"]["kpi_id"] + self.monitored_kpis["l3_unique_compromised_clients"]["kpi_id"] ) # get the number of unique compromised clients (grouping by origin IP) of the last aggregation time interval as indicated by the self.MONITORED_KPIS_TIME_INTERVAL_AGG variable @@ -393,7 +408,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto and monitor_inference_results[i]["timestamp"] < time_interval_end and monitor_inference_results[i]["output"]["service_id"] == service_id and service_id.service_uuid.uuid - in self.monitored_kpis["l3_unique_attack_conns"]["service_ids"] + in self.monitored_kpis["l3_unique_compromised_clients"]["service_ids"] ): if monitor_inference_results[i]["output"]["tag"] == self.CRYPTO_CLASS: if ( @@ -409,7 +424,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto # L3 unique attackers kpi_unique_attackers = Kpi() - kpi_unique_attackers.kpi_id.kpi_id.CopyFrom(self.monitored_kpis["l3_unique_attack_conns"]["kpi_id"]) + kpi_unique_attackers.kpi_id.kpi_id.CopyFrom(self.monitored_kpis["l3_unique_attackers"]["kpi_id"]) # get the number of unique attackers (grouping by destination ip) of the last aggregation time interval as indicated by the self.MONITORED_KPIS_TIME_INTERVAL_AGG variable num_unique_attackers_last_time_interval = 0 @@ -420,8 +435,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto monitor_inference_results[i]["timestamp"] >= time_interval_start and monitor_inference_results[i]["timestamp"] < time_interval_end and monitor_inference_results[i]["output"]["service_id"] == service_id - and service_id.service_uuid.uuid - in self.monitored_kpis["l3_unique_attack_conns"]["service_ids"] + and service_id.service_uuid.uuid in self.monitored_kpis["l3_unique_attackers"]["service_ids"] ): if monitor_inference_results[i]["output"]["tag"] == self.CRYPTO_CLASS: if ( @@ -452,30 +466,274 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto LOGGER.debug("kpi_unique_compromised_clients: {}".format(kpi_unique_compromised_clients)) LOGGER.debug("kpi_unique_attackers: {}".format(kpi_unique_attackers)) - _create_kpi_request = KpiDescriptor() - _create_kpi_request.kpi_description = "KPI Description Test" - _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN - _create_kpi_request.device_id.device_uuid.uuid = "DEVUPM" # pylint: disable=maybe-no-member - _create_kpi_request.service_id.service_uuid.uuid = "SERVUPM" # pylint: disable=maybe-no-member - _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "ENDUPM" # pylint: disable=maybe-no-member + kpi_security_status.kpi_value.floatVal = 500 + kpi_conf.kpi_value.floatVal = 500 + kpi_unique_attack_conns.kpi_value.floatVal = 500 + kpi_unique_compromised_clients.kpi_value.floatVal = 500 + kpi_unique_attackers.kpi_value.floatVal = 500 + + try: + self.monitoring_client_test.IncludeKpi(kpi_security_status) + self.monitoring_client_test.IncludeKpi(kpi_conf) + self.monitoring_client_test.IncludeKpi(kpi_unique_attack_conns) + self.monitoring_client_test.IncludeKpi(kpi_unique_compromised_clients) + self.monitoring_client_test.IncludeKpi(kpi_unique_attackers) + except Exception as e: + LOGGER.debug("Error sending KPIs to monitoring server: {}".format(e)) + + # self.monitoring_client_test.IncludeKpi(kpi_security_status) + # self.monitoring_client_test.IncludeKpi(kpi_conf) + # self.monitoring_client_test.IncludeKpi(kpi_unique_attack_conns) + # self.monitoring_client_test.IncludeKpi(kpi_unique_compromised_clients) + # self.monitoring_client_test.IncludeKpi(kpi_unique_attackers) + + LOGGER.debug("KPIs sent to monitoring server") + + def monitor_kpis_test( + self, + ): + monitor_inference_results = self.inference_results + monitor_service_ids = self.service_ids + + LOGGER.debug("monitor_inference_results: {}".format(len(monitor_inference_results))) + LOGGER.debug("monitor_service_ids: {}".format(len(monitor_service_ids))) + + time_interval = self.MONITORED_KPIS_TIME_INTERVAL_AGG + + # assign the timestamp of the first inference result to the time_interval_start + if self.time_interval_start is None: + self.time_interval_start = monitor_inference_results[0]["timestamp"] + LOGGER.debug("self.time_interval_start: {}".format(self.time_interval_start)) + # self.time_interval_start = datetime.strptime(self.time_interval_start, "%Y-%m-%d %H:%M:%S.%f") + + # add time_interval to the current time to get the time interval end + LOGGER.debug("time_interval: {}".format(time_interval)) + LOGGER.debug(timedelta(seconds=time_interval)) + self.time_interval_end = self.time_interval_start + timedelta(seconds=time_interval) + + current_time = datetime.utcnow() + + LOGGER.debug("current_time: {}".format(current_time)) + + if current_time >= self.time_interval_end: + self.time_interval_start = self.time_interval_end + self.time_interval_end = self.time_interval_start + timedelta(seconds=time_interval) + + LOGGER.debug("time_interval_start: {}".format(self.time_interval_start)) + LOGGER.debug("time_interval_end: {}".format(self.time_interval_end)) + + # check if there is at least one inference result in monitor_inference_results in the current time_interval + non_empty_time_interval = False + + for i in range(len(monitor_inference_results)): + inference_result_timestamp = monitor_inference_results[i]["timestamp"] + + if ( + inference_result_timestamp >= self.time_interval_start + and inference_result_timestamp < self.time_interval_end + ): + non_empty_time_interval = True + break + + if non_empty_time_interval: + for service_id in monitor_service_ids: + LOGGER.debug("service_id: {}".format(service_id)) + + # L3 security status + kpi_security_status = Kpi() + kpi_security_status.kpi_id.kpi_id.CopyFrom(self.monitored_kpis["l3_security_status"]["kpi_id"]) + + # get the output.tag of the ML model of the last aggregation time interval as indicated by the self.MONITORED_KPIS_TIME_INTERVAL_AGG variable + outputs_last_time_interval = [] + + for i in range(len(monitor_inference_results)): + if ( + monitor_inference_results[i]["timestamp"] >= self.time_interval_start + and monitor_inference_results[i]["timestamp"] < self.time_interval_end + and monitor_inference_results[i]["output"]["service_id"] == service_id + and service_id.service_uuid.uuid in self.monitored_kpis["l3_security_status"]["service_ids"] + ): + outputs_last_time_interval.append(monitor_inference_results[i]["output"]["tag"]) + + LOGGER.debug("outputs_last_time_interval: {}".format(outputs_last_time_interval)) + + # check if all outputs are 0 + all_outputs_zero = True + for output in outputs_last_time_interval: + if output != self.NORMAL_CLASS: + all_outputs_zero = False + break + + if all_outputs_zero: + kpi_security_status.kpi_value.int32Val = 0 + else: + kpi_security_status.kpi_value.int32Val = 1 + + # L3 ML model confidence + kpi_conf = Kpi() + kpi_conf.kpi_id.kpi_id.CopyFrom(self.monitored_kpis["l3_ml_model_confidence"]["kpi_id"]) + + # get the output.confidence of the ML model of the last aggregation time interval as indicated by the self.MONITORED_KPIS_TIME_INTERVAL_AGG variable + confidences_normal_last_time_interval = [] + confidences_crypto_last_time_interval = [] + + for i in range(len(monitor_inference_results)): + LOGGER.debug("monitor_inference_results[i]: {}".format(monitor_inference_results[i])) + + if ( + monitor_inference_results[i]["timestamp"] >= self.time_interval_start + and monitor_inference_results[i]["timestamp"] < self.time_interval_end + and monitor_inference_results[i]["output"]["service_id"] == service_id + and service_id.service_uuid.uuid + in self.monitored_kpis["l3_ml_model_confidence"]["service_ids"] + ): + if monitor_inference_results[i]["output"]["tag"] == self.NORMAL_CLASS: + confidences_normal_last_time_interval.append( + monitor_inference_results[i]["output"]["confidence"] + ) + elif monitor_inference_results[i]["output"]["tag"] == self.CRYPTO_CLASS: + confidences_crypto_last_time_interval.append( + monitor_inference_results[i]["output"]["confidence"] + ) + else: + LOGGER.debug("Unknown tag: {}".format(monitor_inference_results[i]["output"]["tag"])) + + LOGGER.debug("confidences_normal_last_time_interval: {}".format(confidences_normal_last_time_interval)) + LOGGER.debug("confidences_crypto_last_time_interval: {}".format(confidences_crypto_last_time_interval)) + + if kpi_security_status.kpi_value.int32Val == 0: + kpi_conf.kpi_value.floatVal = np.mean(confidences_normal_last_time_interval) + else: + kpi_conf.kpi_value.floatVal = np.mean(confidences_crypto_last_time_interval) + + # L3 unique attack connections + kpi_unique_attack_conns = Kpi() + kpi_unique_attack_conns.kpi_id.kpi_id.CopyFrom(self.monitored_kpis["l3_unique_attack_conns"]["kpi_id"]) + + # get the number of unique attack connections (grouping by origin IP, origin port, destination IP, destination port) of the last aggregation time interval as indicated by the self.MONITORED_KPIS_TIME_INTERVAL_AGG variable + num_unique_attack_conns_last_time_interval = 0 + unique_attack_conns_last_time_interval = [] + + for i in range(len(monitor_inference_results)): + if ( + monitor_inference_results[i]["timestamp"] >= self.time_interval_start + and monitor_inference_results[i]["timestamp"] < self.time_interval_end + and monitor_inference_results[i]["output"]["service_id"] == service_id + and service_id.service_uuid.uuid + in self.monitored_kpis["l3_unique_attack_conns"]["service_ids"] + ): + if monitor_inference_results[i]["output"]["tag"] == self.CRYPTO_CLASS: + current_attack_conn = { + "ip_o": monitor_inference_results[i]["output"]["ip_o"], + "port_o": monitor_inference_results[i]["output"]["port_o"], + "ip_d": monitor_inference_results[i]["output"]["ip_d"], + "port_d": monitor_inference_results[i]["output"]["port_d"], + } + + is_unique_attack_conn = True + + for j in range(len(unique_attack_conns_last_time_interval)): + if current_attack_conn == unique_attack_conns_last_time_interval[j]: + is_unique_attack_conn = False + + if is_unique_attack_conn: + num_unique_attack_conns_last_time_interval += 1 + unique_attack_conns_last_time_interval.append(current_attack_conn) + + kpi_unique_attack_conns.kpi_value.int32Val = num_unique_attack_conns_last_time_interval + + # L3 unique compromised clients + kpi_unique_compromised_clients = Kpi() + kpi_unique_compromised_clients.kpi_id.kpi_id.CopyFrom( + self.monitored_kpis["l3_unique_compromised_clients"]["kpi_id"] + ) + + # get the number of unique compromised clients (grouping by origin IP) of the last aggregation time interval as indicated by the self.MONITORED_KPIS_TIME_INTERVAL_AGG variable + num_unique_compromised_clients_last_time_interval = 0 + unique_compromised_clients_last_time_interval = [] + + for i in range(len(monitor_inference_results)): + if ( + monitor_inference_results[i]["timestamp"] >= self.time_interval_start + and monitor_inference_results[i]["timestamp"] < self.time_interval_end + and monitor_inference_results[i]["output"]["service_id"] == service_id + and service_id.service_uuid.uuid + in self.monitored_kpis["l3_unique_compromised_clients"]["service_ids"] + ): + if monitor_inference_results[i]["output"]["tag"] == self.CRYPTO_CLASS: + if ( + monitor_inference_results[i]["output"]["ip_o"] + not in unique_compromised_clients_last_time_interval + ): + unique_compromised_clients_last_time_interval.append( + monitor_inference_results[i]["output"]["ip_o"] + ) + num_unique_compromised_clients_last_time_interval += 1 + + kpi_unique_compromised_clients.kpi_value.int32Val = num_unique_compromised_clients_last_time_interval + + # L3 unique attackers + kpi_unique_attackers = Kpi() + kpi_unique_attackers.kpi_id.kpi_id.CopyFrom(self.monitored_kpis["l3_unique_attackers"]["kpi_id"]) - new_kpi = self.monitoring_client_test.SetKpi(_create_kpi_request) - LOGGER.debug("New KPI: {}".format(new_kpi)) + # get the number of unique attackers (grouping by destination ip) of the last aggregation time interval as indicated by the self.MONITORED_KPIS_TIME_INTERVAL_AGG variable + num_unique_attackers_last_time_interval = 0 + unique_attackers_last_time_interval = [] - _include_kpi_request = Kpi() - _include_kpi_request.kpi_id.kpi_id.uuid = new_kpi.kpi_id.uuid - _include_kpi_request.timestamp.timestamp = timestamp_utcnow_to_float() - _include_kpi_request.kpi_value.floatVal = 500 + for i in range(len(monitor_inference_results)): + if ( + monitor_inference_results[i]["timestamp"] >= self.time_interval_start + and monitor_inference_results[i]["timestamp"] < self.time_interval_end + and monitor_inference_results[i]["output"]["service_id"] == service_id + and service_id.service_uuid.uuid in self.monitored_kpis["l3_unique_attackers"]["service_ids"] + ): + if monitor_inference_results[i]["output"]["tag"] == self.CRYPTO_CLASS: + if ( + monitor_inference_results[i]["output"]["ip_d"] + not in unique_attackers_last_time_interval + ): + unique_attackers_last_time_interval.append( + monitor_inference_results[i]["output"]["ip_d"] + ) + num_unique_attackers_last_time_interval += 1 - self.monitoring_client_test.IncludeKpi(_include_kpi_request) + kpi_unique_attackers.kpi_value.int32Val = num_unique_attackers_last_time_interval - self.monitoring_client.IncludeKpi(kpi_security_status) - self.monitoring_client.IncludeKpi(kpi_conf) - self.monitoring_client.IncludeKpi(kpi_unique_attack_conns) - self.monitoring_client.IncludeKpi(kpi_unique_compromised_clients) - self.monitoring_client.IncludeKpi(kpi_unique_attackers) + timestamp = Timestamp() + timestamp.timestamp = timestamp_utcnow_to_float() + + kpi_security_status.timestamp.CopyFrom(timestamp) + kpi_conf.timestamp.CopyFrom(timestamp) + kpi_unique_attack_conns.timestamp.CopyFrom(timestamp) + kpi_unique_compromised_clients.timestamp.CopyFrom(timestamp) + kpi_unique_attackers.timestamp.CopyFrom(timestamp) + + LOGGER.debug("Sending KPIs to monitoring server") + + LOGGER.debug("kpi_security_status: {}".format(kpi_security_status)) + LOGGER.debug("kpi_conf: {}".format(kpi_conf)) + LOGGER.debug("kpi_unique_attack_conns: {}".format(kpi_unique_attack_conns)) + LOGGER.debug("kpi_unique_compromised_clients: {}".format(kpi_unique_compromised_clients)) + LOGGER.debug("kpi_unique_attackers: {}".format(kpi_unique_attackers)) + + try: + self.monitoring_client.IncludeKpi(kpi_security_status) + self.monitoring_client.IncludeKpi(kpi_conf) + self.monitoring_client.IncludeKpi(kpi_unique_attack_conns) + self.monitoring_client.IncludeKpi(kpi_unique_compromised_clients) + self.monitoring_client.IncludeKpi(kpi_unique_attackers) + except Exception as e: + LOGGER.debug("Error sending KPIs to monitoring server: {}".format(e)) + + # self.monitoring_client_test.IncludeKpi(kpi_security_status) + # self.monitoring_client_test.IncludeKpi(kpi_conf) + # self.monitoring_client_test.IncludeKpi(kpi_unique_attack_conns) + # self.monitoring_client_test.IncludeKpi(kpi_unique_compromised_clients) + # self.monitoring_client_test.IncludeKpi(kpi_unique_attackers) LOGGER.debug("KPIs sent to monitoring server") + else: + LOGGER.debug("No KPIs to send to monitoring server") """ Classify connection as standard traffic or cryptomining attack and return results @@ -551,17 +809,18 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto cryptomining_detector_output = self.make_inference(request) logging.info("Inference performed correctly") - # Store the results of the inference that will be later used to monitor the KPIs - # Protobuff messages are NOT pickable, so we need to serialize them first - cryptomining_detector_output_serialized = copy.deepcopy(cryptomining_detector_output) - cryptomining_detector_output_serialized["service_id"] = MessageToJson( - request.service_id, preserving_proto_field_name=True - ) - cryptomining_detector_output_serialized["endpoint_id"] = MessageToJson( - request.endpoint_id, preserving_proto_field_name=True - ) + # # Store the results of the inference that will be later used to monitor the KPIs + # # Protobuff messages are NOT pickable, so we need to serialize them first + # cryptomining_detector_output_serialized = copy.deepcopy(cryptomining_detector_output) + # cryptomining_detector_output_serialized["service_id"] = MessageToJson( + # request.service_id, preserving_proto_field_name=True + # ) + # cryptomining_detector_output_serialized["endpoint_id"] = MessageToJson( + # request.endpoint_id, preserving_proto_field_name=True + # ) - self.inference_results.put({"output": cryptomining_detector_output_serialized, "timestamp": datetime.now()}) + # self.inference_results.put({"output": cryptomining_detector_output_serialized, "timestamp": datetime.now()}) + self.inference_results.append({"output": cryptomining_detector_output, "timestamp": datetime.now()}) service_id = request.service_id device_id = request.endpoint_id.device_id @@ -571,7 +830,29 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto if service_id not in self.service_ids: self.create_kpis(service_id, device_id, endpoint_id) self.service_ids.append(service_id) - self.monitored_service_ids.put(MessageToJson(service_id, preserving_proto_field_name=True)) + # self.monitored_service_ids.put(MessageToJson(service_id, preserving_proto_field_name=True)) + + self.monitor_kpis_test() + + # if self.kpi_test is None: + # _create_kpi_request = KpiDescriptor() + # _create_kpi_request.kpi_description = "KPI Description Test" + # _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN + # _create_kpi_request.device_id.device_uuid.uuid = "DEVUPM" # pylint: disable=maybe-no-member + # _create_kpi_request.service_id.service_uuid.uuid = "SERVUPM" # pylint: disable=maybe-no-member + # _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "ENDUPM" # pylint: disable=maybe-no-member + # _create_kpi_request.connection_id.connection_uuid.uuid = "CONUPM" # pylint: disable=maybe-no-member + # _create_kpi_request.slice_id.slice_uuid.uuid = "SLIUPM" # pylint: disable=maybe-no-member + + # self.kpi_test = self.monitoring_client.SetKpi(_create_kpi_request) + # LOGGER.debug("KPI Test: {}".format(self.kpi_test)) + + # _include_kpi_request = Kpi() + # _include_kpi_request.kpi_id.kpi_id.uuid = self.kpi_test.kpi_id.uuid + # _include_kpi_request.timestamp.timestamp = timestamp_utcnow_to_float() + # _include_kpi_request.kpi_value.floatVal = 500 + + # self.monitoring_client.IncludeKpi(_include_kpi_request) # Only notify Attack Mitigator when a cryptomining connection has been detected if cryptomining_detector_output["tag_name"] == "Crypto":