Skip to content
Snippets Groups Projects
Commit ac50ef03 authored by Carlos Natalino Da Silva's avatar Carlos Natalino Da Silva
Browse files

Improving code.

parent 68a67051
No related branches found
No related tags found
2 merge requests!142Release TeraFlowSDN 2.1,!97Optical cybersecurity scenario
......@@ -83,31 +83,12 @@ LOOP_TIME = Histogram(
"tfs_opticalattackmanager_loop_seconds",
"Time taken by each security loop",
buckets=(
1.0,
2.5,
5.0,
7.5,
10.0,
12.5,
15.0,
17.5,
20.0,
22.5,
25.0,
27.5,
30.0,
32.5,
35.0,
37.5,
40.0,
42.5,
45.0,
47.5,
50.0,
52.5,
55.0,
57.5,
60.0,
1.0, 2.5, 5.0, 7.5, 10.0, 12.5,
15.0, 17.5, 20.0, 22.5, 25.0, 27.5,
30.0, 32.5, 35.0, 37.5, 40.0,
42.5, 45.0, 47.5,
50.0, 52.5, 55.0, 57.5,
60.0, 70.0, 80.0, 90.0, 100.0,
float("inf"),
),
)
......@@ -127,6 +108,11 @@ DESIRED_MONITORING_INTERVAL = Gauge(
"Desired loop monitoring interval",
)
DROP_COUNTER = Counter(
"tfs_opticalattackmanager_dropped_assessments",
"Dropped assessments due to detector timeout",
)
global service_list
global cache
......@@ -390,6 +376,7 @@ async def monitor_services(terminate, service_list=None, cache=None):
(i + 1) * k + min(i + 1, m), # last index
host,
port,
DROP_COUNTER,
desired_monitoring_interval * 0.9,
)
for i in range(cur_number_workers)
......
......@@ -22,13 +22,7 @@ from prometheus_client import Counter
from common.proto.asyncio.optical_attack_detector_grpc import \
OpticalAttackDetectorServiceStub
from common.proto.asyncio.optical_attack_detector_pb2 import DetectionRequest
from common.Settings import get_log_level, get_setting
DROP_COUNTER = Counter(
"tfs_opticalattackmanager_dropped_assessments",
"Dropped assessments due to detector timeout",
)
from common.Settings import get_log_level
log_level = get_log_level()
logging.basicConfig(level=log_level)
......@@ -41,6 +35,7 @@ async def detect_attack(
context_id: str,
service_id: str,
kpi_id: str,
drop_counter: Counter,
timeout: float = 20.0,
) -> None:
try:
......@@ -61,7 +56,7 @@ async def detect_attack(
"Exception while processing service_id {}/{}".format(service_id, kpi_id)
)
# LOGGER.exception(e)
DROP_COUNTER.inc()
drop_counter.inc()
def delegate_services(
......@@ -70,6 +65,7 @@ def delegate_services(
end_index: int,
host: str,
port: str,
drop_counter: Counter,
monitoring_interval: float,
):
async def run_internal_loop():
......@@ -81,6 +77,7 @@ def delegate_services(
service["context"],
service["service"],
service["kpi"],
drop_counter,
# allow at most 90% of the monitoring interval to succeed
monitoring_interval * 0.9,
)
......
......@@ -1380,7 +1380,7 @@
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"fillOpacity": 29,
"gradientMode": "none",
"hideFrom": {
"legend": false,
......@@ -1397,7 +1397,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
......@@ -1416,7 +1416,8 @@
"value": 80
}
]
}
},
"unit": "mwatt"
},
"overrides": []
},
......@@ -1445,10 +1446,71 @@
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "sum(scaph_process_power_consumption_microwatts{namespace=\"tfs\", cmdline=~\".+opticalattackdetector.+\"})/1000000",
"legendFormat": "Detector",
"exemplar": false,
"expr": "sum(scaph_process_power_consumption_microwatts{namespace=\"tfs\", cmdline=~\".+opticalattackmanager.+\"})/1000",
"instant": false,
"legendFormat": "Manager",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(scaph_process_power_consumption_microwatts{namespace=\"tfs\", cmdline=~\".+opticalattackdetector.+\"})/1000",
"hide": false,
"instant": false,
"legendFormat": "Detector",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(scaph_process_power_consumption_microwatts{namespace=\"tfs\", cmdline=~\".+dbscan.+\"})/1000",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "UL Inference",
"range": true,
"refId": "C"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(scaph_process_power_consumption_microwatts{namespace=\"tfs\", cmdline=~\"redis-server.+\"})/1000",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "Cache",
"range": true,
"refId": "D"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(scaph_process_power_consumption_microwatts{namespace=\"tfs\", cmdline=~\".+opticalattackmitigator.+\"})/1000",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "Mitigator",
"range": true,
"refId": "E"
}
],
"title": "Energy consumption",
......@@ -1463,7 +1525,7 @@
"list": []
},
"time": {
"from": "now-15m",
"from": "now-5m",
"to": "now"
},
"timepicker": {},
......
......@@ -32,7 +32,7 @@ v1 = client.CoreV1Api()
caching_pod = None
pods = v1.list_namespaced_pod(namespace=namespace)
for pod in pods.items:
print(pod.metadata)
# print(pod.metadata)
if "app" in pod.metadata.labels and "caching" in pod.metadata.labels["app"]:
caching_pod = pod
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment