From dd6cc334f8b63898052a864dfa43861e7470848d Mon Sep 17 00:00:00 2001
From: gifrerenom <lluis.gifre@cttc.es>
Date: Mon, 20 Feb 2023 17:46:51 +0000
Subject: [PATCH] Load Generator component:

- Extracted constant MAX_WORKER_THREADS
- Rounded SLA values to 2 decimal digits
---
 src/load_generator/load_gen/Constants.py       |  2 ++
 .../load_gen/RequestGenerator.py               | 18 +++++++++---------
 .../load_gen/RequestScheduler.py               |  3 ++-
 3 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py
index b71dd9a35..9ae3cdc12 100644
--- a/src/load_generator/load_gen/Constants.py
+++ b/src/load_generator/load_gen/Constants.py
@@ -26,3 +26,5 @@ ENDPOINT_COMPATIBILITY = {
     'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:INPUT': 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:OUTPUT',
     'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT'  : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT',
 }
+
+MAX_WORKER_THREADS = 10
\ No newline at end of file
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index a6d14307e..0ada285bc 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -230,9 +230,9 @@ class RequestGenerator:
         ]
 
         if request_type == RequestType.SERVICE_L2NM:
-            availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-            capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-            e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+            availability   = round(random.uniform(0.0,  99.99), ndigits=2)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -275,9 +275,9 @@ class RequestGenerator:
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
         elif request_type == RequestType.SERVICE_L3NM:
-            availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-            capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-            e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+            availability   = round(random.uniform(0.0,  99.99), ndigits=2)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -380,9 +380,9 @@ class RequestGenerator:
             json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
         ]
 
-        availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-        capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-        e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+        availability   = round(random.uniform(0.0,  99.99), ndigits=2)
+        capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+        e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
         constraints = [
             json_constraint_sla_availability(1, True, availability),
             json_constraint_sla_capacity(capacity_gbps),
diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py
index 57afe80be..773a37eac 100644
--- a/src/load_generator/load_gen/RequestScheduler.py
+++ b/src/load_generator/load_gen/RequestScheduler.py
@@ -21,6 +21,7 @@ from typing import Dict, Optional
 from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
+from .Constants import MAX_WORKER_THREADS
 from .DltTools import explore_entities_to_record, record_entities
 from .Parameters import Parameters
 from .RequestGenerator import RequestGenerator
@@ -37,7 +38,7 @@ class RequestScheduler:
         self._scheduler = scheduler_class()
         self._scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=10)},
+            executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)},
             job_defaults = {
                 'coalesce': False,
                 'max_instances': 100,
-- 
GitLab