diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py
index b71dd9a35329e2aef6ce64739f59103a656b4de3..9ae3cdc1216891ca4dfcf01c1bd49d27bf4ef6f6 100644
--- a/src/load_generator/load_gen/Constants.py
+++ b/src/load_generator/load_gen/Constants.py
@@ -26,3 +26,5 @@ ENDPOINT_COMPATIBILITY = {
     'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:INPUT': 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:OUTPUT',
     'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT'  : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT',
 }
+
+MAX_WORKER_THREADS = 10
\ No newline at end of file
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index a6d14307eee9bbc531e09495d4b650e361aa3d26..0ada285bc88e7d6a2405c3639f31778824d84855 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -230,9 +230,9 @@ class RequestGenerator:
         ]
 
         if request_type == RequestType.SERVICE_L2NM:
-            availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-            capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-            e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+            availability   = round(random.uniform(0.0,  99.99), ndigits=2)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -275,9 +275,9 @@ class RequestGenerator:
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
         elif request_type == RequestType.SERVICE_L3NM:
-            availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-            capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-            e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+            availability   = round(random.uniform(0.0,  99.99), ndigits=2)
+            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -380,9 +380,9 @@ class RequestGenerator:
             json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
         ]
 
-        availability  = int(random.uniform(00.0, 99.99) * 100.0) / 100.0
-        capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0
-        e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0
+        availability   = round(random.uniform(0.0,  99.99), ndigits=2)
+        capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
+        e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
         constraints = [
             json_constraint_sla_availability(1, True, availability),
             json_constraint_sla_capacity(capacity_gbps),
diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py
index 57afe80bec569b29d2931256a8c1cf7a1ab3eb85..773a37eac258f8b3c16c966464ced124d3c77c85 100644
--- a/src/load_generator/load_gen/RequestScheduler.py
+++ b/src/load_generator/load_gen/RequestScheduler.py
@@ -21,6 +21,7 @@ from typing import Dict, Optional
 from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
+from .Constants import MAX_WORKER_THREADS
 from .DltTools import explore_entities_to_record, record_entities
 from .Parameters import Parameters
 from .RequestGenerator import RequestGenerator
@@ -37,7 +38,7 @@ class RequestScheduler:
         self._scheduler = scheduler_class()
         self._scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=10)},
+            executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)},
             job_defaults = {
                 'coalesce': False,
                 'max_instances': 100,