diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 22268ba5a6decc59dc0a9bd532e3b79a9e5e64d6..399d3a248414955d2bfbd1a67a4082cadd0c22d6 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -495,5 +495,14 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
     curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
     echo
 
+    # Dashboard: Load Generator Status
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_load_generator.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-loadgen-stats"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
     printf "\n\n"
 fi
diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml
index 3f65c2c857a39f2b7a5ebeaccd9ddfd4916f2487..7cc6f19122573a612ddca774c3a785bff93f8b38 100644
--- a/manifests/load_generatorservice.yaml
+++ b/manifests/load_generatorservice.yaml
@@ -33,6 +33,7 @@ spec:
         imagePullPolicy: Always
         ports:
         - containerPort: 50052
+        - containerPort: 9192
         env:
         - name: LOG_LEVEL
           value: "INFO"
@@ -65,3 +66,7 @@ spec:
     protocol: TCP
     port: 50052
     targetPort: 50052
+  - name: metrics
+    protocol: TCP
+    port: 9192
+    targetPort: 9192
diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml
index f5da08182a4665b21607987ea97d9bf3cc5b7e21..ec929f757cdf5468a7db7a7c1f1e755611d5327b 100644
--- a/manifests/servicemonitors.yaml
+++ b/manifests/servicemonitors.yaml
@@ -330,3 +330,32 @@ spec:
     any: false
     matchNames:
     - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-load-generatorservice-metric
+  labels:
+    app: load-generatorservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: load-generatorservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
diff --git a/proto/load_generator.proto b/proto/load_generator.proto
index 86f9469588f1586da5339edad198e39e82598cde..395afbb883d4c014432a056ada5ca5bb467738a3 100644
--- a/proto/load_generator.proto
+++ b/proto/load_generator.proto
@@ -33,16 +33,32 @@ enum RequestTypeEnum {
   REQUESTTYPE_SLICE_L3NM   = 6;
 }
 
+message Range {
+  float minimum = 1;
+  float maximum = 2;
+}
+
+message ScalarOrRange {
+  oneof value {
+    float scalar = 1; // select the scalar value
+    Range range = 2;  // select a random uniformly dstributed value between minimum and maximum
+  }
+}
+
 message Parameters {
   uint64 num_requests = 1;  // if == 0, generate infinite requests
   repeated RequestTypeEnum request_types = 2;
   float offered_load = 3;
   float holding_time = 4;
   float inter_arrival_time = 5;
-  bool do_teardown = 6;
-  bool dry_mode = 7;
-  bool record_to_dlt = 8;
-  string dlt_domain_id = 9;
+  repeated ScalarOrRange availability = 6;    // one from the list is selected
+  repeated ScalarOrRange capacity_gbps = 7;   // one from the list is selected
+  repeated ScalarOrRange e2e_latency_ms = 8;  // one from the list is selected
+  uint32 max_workers = 9;
+  bool do_teardown = 10;
+  bool dry_mode = 11;
+  bool record_to_dlt = 12;
+  string dlt_domain_id = 13;
 }
 
 message Status {
diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py
index 77d9a637c88aa76709d5ddee5326c0a3fad917fc..b241d3b62821c0bfe319546cbeadce79fce59db9 100644
--- a/src/common/method_wrappers/Decorator.py
+++ b/src/common/method_wrappers/Decorator.py
@@ -15,7 +15,7 @@
 import grpc, json, logging, threading
 from enum import Enum
 from prettytable import PrettyTable
-from typing import Any, Dict, List, Set, Tuple
+from typing import Any, Dict, List, Optional, Set, Tuple
 from prometheus_client import Counter, Histogram
 from prometheus_client.metrics import MetricWrapperBase, INF
 from common.tools.grpc.Tools import grpc_message_to_json_string
@@ -25,12 +25,14 @@ class MetricTypeEnum(Enum):
     COUNTER_STARTED    = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_started'
     COUNTER_COMPLETED  = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_completed'
     COUNTER_FAILED     = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_failed'
+    COUNTER_BLOCKED    = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_blocked'
     HISTOGRAM_DURATION = 'tfs_{component:s}_{sub_module:s}_{method:s}_histogram_duration'
 
 METRIC_TO_CLASS_PARAMS = {
     MetricTypeEnum.COUNTER_STARTED   : (Counter,   {}),
     MetricTypeEnum.COUNTER_COMPLETED : (Counter,   {}),
     MetricTypeEnum.COUNTER_FAILED    : (Counter,   {}),
+    MetricTypeEnum.COUNTER_BLOCKED   : (Counter,   {}),
     MetricTypeEnum.HISTOGRAM_DURATION: (Histogram, {
         'buckets': (
             # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
@@ -75,21 +77,45 @@ class MetricsPool:
             return MetricsPool.metrics[metric_name]
 
     def get_metrics(
-        self, method : str
+        self, method : str, labels : Optional[Dict[str, str]] = None
     ) -> Tuple[Histogram, Counter, Counter, Counter]:
         histogram_duration : Histogram = self.get_or_create(method, MetricTypeEnum.HISTOGRAM_DURATION)
         counter_started    : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_STARTED)
         counter_completed  : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_COMPLETED)
         counter_failed     : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_FAILED)
 
-        if len(self._labels) > 0:
-            histogram_duration = histogram_duration.labels(**(self._labels))
-            counter_started    = counter_started.labels(**(self._labels))
-            counter_completed  = counter_completed.labels(**(self._labels))
-            counter_failed     = counter_failed.labels(**(self._labels))
+        if labels is None and len(self._labels) > 0:
+            labels = self._labels
+
+        if labels is not None and len(labels) > 0:
+            histogram_duration = histogram_duration.labels(**labels)
+            counter_started    = counter_started.labels(**labels)
+            counter_completed  = counter_completed.labels(**labels)
+            counter_failed     = counter_failed.labels(**labels)
 
         return histogram_duration, counter_started, counter_completed, counter_failed
 
+    def get_metrics_loadgen(
+        self, method : str, labels : Optional[Dict[str, str]] = None
+    ) -> Tuple[Histogram, Counter, Counter, Counter, Counter]:
+        histogram_duration : Histogram = self.get_or_create(method, MetricTypeEnum.HISTOGRAM_DURATION)
+        counter_started    : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_STARTED)
+        counter_completed  : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_COMPLETED)
+        counter_failed     : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_FAILED)
+        counter_blocked    : Counter   = self.get_or_create(method, MetricTypeEnum.COUNTER_BLOCKED)
+
+        if labels is None and len(self._labels) > 0:
+            labels = self._labels
+
+        if labels is not None and len(labels) > 0:
+            histogram_duration = histogram_duration.labels(**labels)
+            counter_started    = counter_started.labels(**labels)
+            counter_completed  = counter_completed.labels(**labels)
+            counter_failed     = counter_failed.labels(**labels)
+            counter_blocked    = counter_blocked.labels(**labels)
+
+        return histogram_duration, counter_started, counter_completed, counter_failed, counter_blocked
+
     def get_pretty_table(self, remove_empty_buckets : bool = True) -> PrettyTable:
         with MetricsPool.lock:
             method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]] = dict()
diff --git a/src/load_generator/command/__main__.py b/src/load_generator/command/__main__.py
index 7504eb6da6d6adea698249240abf2c4e4559297a..a97f081a32269ff824733b9a2a69be21bfb2004f 100644
--- a/src/load_generator/command/__main__.py
+++ b/src/load_generator/command/__main__.py
@@ -36,6 +36,10 @@ def main():
         ],
         offered_load  = 50,
         holding_time  = 10,
+        availability_ranges   = [[0.0, 99.9999]],
+        capacity_gbps_ranges  = [[0.1, 100.00]],
+        e2e_latency_ms_ranges = [[5.0, 100.00]],
+        max_workers   = 10,
         dry_mode      = False,           # in dry mode, no request is sent to TeraFlowSDN
         record_to_dlt = False,           # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT
         dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False
diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py
index 9ae3cdc1216891ca4dfcf01c1bd49d27bf4ef6f6..09cdecab124a776d3f71f66554db0934eaf1bb1c 100644
--- a/src/load_generator/load_gen/Constants.py
+++ b/src/load_generator/load_gen/Constants.py
@@ -27,4 +27,8 @@ ENDPOINT_COMPATIBILITY = {
     'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT'  : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT',
 }
 
-MAX_WORKER_THREADS = 10
\ No newline at end of file
+DEFAULT_AVAILABILITY_RANGES   = [[0.0, 99.9999]]
+DEFAULT_CAPACITY_GBPS_RANGES  = [[0.1, 100.00]]
+DEFAULT_E2E_LATENCY_MS_RANGES = [[5.0, 100.00]]
+
+DEFAULT_MAX_WORKERS = 10
diff --git a/src/load_generator/load_gen/Parameters.py b/src/load_generator/load_gen/Parameters.py
index f0de3ea1aa268c520fd214f7f621953289ac5bc9..aca40cd3854fad203f15ce9b07a79715e9ea46f6 100644
--- a/src/load_generator/load_gen/Parameters.py
+++ b/src/load_generator/load_gen/Parameters.py
@@ -13,18 +13,30 @@
 # limitations under the License.
 
 from typing import List, Optional
+from load_generator.load_gen.Constants import (
+    DEFAULT_AVAILABILITY_RANGES, DEFAULT_CAPACITY_GBPS_RANGES, DEFAULT_E2E_LATENCY_MS_RANGES, DEFAULT_MAX_WORKERS)
+from load_generator.tools.ListScalarRange import Type_ListScalarRange
+
 
 class Parameters:
     def __init__(
         self, num_requests : int, request_types : List[str], offered_load : Optional[float] = None,
-        inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None, do_teardown : bool = True,
-        dry_mode : bool = False, record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None
+        inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None,
+        availability_ranges : Type_ListScalarRange = DEFAULT_AVAILABILITY_RANGES,
+        capacity_gbps_ranges : Type_ListScalarRange = DEFAULT_CAPACITY_GBPS_RANGES,
+        e2e_latency_ms_ranges : Type_ListScalarRange = DEFAULT_E2E_LATENCY_MS_RANGES,
+        max_workers : int = DEFAULT_MAX_WORKERS, do_teardown : bool = True, dry_mode : bool = False,
+        record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None
     ) -> None:
         self._num_requests = num_requests
         self._request_types = request_types
         self._offered_load = offered_load
         self._inter_arrival_time = inter_arrival_time
         self._holding_time = holding_time
+        self._availability_ranges = availability_ranges
+        self._capacity_gbps_ranges = capacity_gbps_ranges
+        self._e2e_latency_ms_ranges = e2e_latency_ms_ranges
+        self._max_workers = max_workers
         self._do_teardown = do_teardown
         self._dry_mode = dry_mode
         self._record_to_dlt = record_to_dlt
@@ -59,6 +71,18 @@ class Parameters:
     @property
     def holding_time(self): return self._holding_time
 
+    @property
+    def availability_ranges(self): return self._availability_ranges
+
+    @property
+    def capacity_gbps_ranges(self): return self._capacity_gbps_ranges
+
+    @property
+    def e2e_latency_ms_ranges(self): return self._e2e_latency_ms_ranges
+
+    @property
+    def max_workers(self): return self._max_workers
+
     @property
     def do_teardown(self): return self._do_teardown
 
diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
index cf56e221db0fbbe3f080d01af45cd36fc4ef56a0..ab8f7e30e7e9de61bcfa7f5c52b7a09deb00ba2a 100644
--- a/src/load_generator/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -28,6 +28,7 @@ from common.tools.object_factory.Slice import json_slice
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from dlt.connector.client.DltConnectorClient import DltConnectorClient
+from load_generator.tools.ListScalarRange import generate_value
 from .Constants import ENDPOINT_COMPATIBILITY, RequestType
 from .DltTools import record_device_to_dlt, record_link_to_dlt
 from .Parameters import Parameters
@@ -186,11 +187,11 @@ class RequestGenerator:
             self._used_device_endpoints.setdefault(device_uuid, dict()).pop(endpoint_uuid, None)
             self._available_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid)
 
-    def compose_request(self) -> Tuple[bool, Optional[Dict]]: # completed, request
+    def compose_request(self) -> Tuple[bool, Optional[Dict], str]: # completed, request
         with self._lock:
             if not self.infinite_loop and (self._num_generated >= self._parameters.num_requests):
                 LOGGER.info('Generation Done!')
-                return True, None # completed
+                return True, None, None # completed
             self._num_generated += 1
             num_request = self._num_generated
 
@@ -203,9 +204,9 @@ class RequestGenerator:
         if request_type in {
             RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, RequestType.SERVICE_TAPI, RequestType.SERVICE_MW
         }:
-            return False, self._compose_service(num_request, request_uuid, request_type)
+            return False, self._compose_service(num_request, request_uuid, request_type), request_type
         elif request_type in {RequestType.SLICE_L2NM, RequestType.SLICE_L3NM}:
-            return False, self._compose_slice(num_request, request_uuid, request_type)
+            return False, self._compose_slice(num_request, request_uuid, request_type), request_type
 
     def _compose_service(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]:
         # choose source endpoint
@@ -244,9 +245,9 @@ class RequestGenerator:
         ]
 
         if request_type == RequestType.SERVICE_L2NM:
-            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
-            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
-            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+            availability   = generate_value(self._parameters.availability_ranges,   ndigits=5)
+            capacity_gbps  = generate_value(self._parameters.capacity_gbps_ranges,  ndigits=2)
+            e2e_latency_ms = generate_value(self._parameters.e2e_latency_ms_ranges, ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -293,9 +294,9 @@ class RequestGenerator:
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
         elif request_type == RequestType.SERVICE_L3NM:
-            availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
-            capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
-            e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+            availability   = generate_value(self._parameters.availability_ranges,   ndigits=5)
+            capacity_gbps  = generate_value(self._parameters.capacity_gbps_ranges,  ndigits=2)
+            e2e_latency_ms = generate_value(self._parameters.e2e_latency_ms_ranges, ndigits=2)
 
             constraints = [
                 json_constraint_sla_availability(1, True, availability),
@@ -410,9 +411,10 @@ class RequestGenerator:
             json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
         ]
 
-        availability   = round(random.uniform(0.0, 99.9999), ndigits=5)
-        capacity_gbps  = round(random.uniform(0.1, 100.00), ndigits=2)
-        e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2)
+        availability   = generate_value(self._parameters.availability_ranges,   ndigits=5)
+        capacity_gbps  = generate_value(self._parameters.capacity_gbps_ranges,  ndigits=2)
+        e2e_latency_ms = generate_value(self._parameters.e2e_latency_ms_ranges, ndigits=2)
+
         constraints = [
             json_constraint_sla_availability(1, True, availability),
             json_constraint_sla_capacity(capacity_gbps),
diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py
index 773a37eac258f8b3c16c966464ced124d3c77c85..08876e29fa7da3f50ba78553a6aee81f0add7b56 100644
--- a/src/load_generator/load_gen/RequestScheduler.py
+++ b/src/load_generator/load_gen/RequestScheduler.py
@@ -18,10 +18,11 @@ from apscheduler.jobstores.memory import MemoryJobStore
 from apscheduler.schedulers.blocking import BlockingScheduler
 from datetime import datetime, timedelta
 from typing import Dict, Optional
+from common.method_wrappers.Decorator import MetricsPool
 from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
+from common.tools.grpc.Tools import grpc_message_to_json_string
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
-from .Constants import MAX_WORKER_THREADS
 from .DltTools import explore_entities_to_record, record_entities
 from .Parameters import Parameters
 from .RequestGenerator import RequestGenerator
@@ -31,6 +32,10 @@ logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
 
 LOGGER = logging.getLogger(__name__)
 
+METRICS_POOL = MetricsPool('LoadGen', 'Requests', labels={
+    'request_type': ''
+})
+
 class RequestScheduler:
     def __init__(
         self, parameters : Parameters, generator : RequestGenerator, scheduler_class=BlockingScheduler
@@ -38,7 +43,7 @@ class RequestScheduler:
         self._scheduler = scheduler_class()
         self._scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)},
+            executors = {'default': ThreadPoolExecutor(max_workers=parameters.max_workers)},
             job_defaults = {
                 'coalesce': False,
                 'max_instances': 100,
@@ -64,11 +69,12 @@ class RequestScheduler:
         self._scheduler.add_job(
             self._request_setup, trigger='date', run_date=run_date, timezone=pytz.utc)
 
-    def _schedule_request_teardown(self, request : Dict) -> None:
+    def _schedule_request_teardown(self, request : Dict, request_type : str) -> None:
         ht  = random.expovariate(1.0 / self._parameters.holding_time)
         run_date = datetime.utcnow() + timedelta(seconds=ht)
+        args = (request, request_type)
         self._scheduler.add_job(
-            self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc)
+            self._request_teardown, args=args, trigger='date', run_date=run_date, timezone=pytz.utc)
 
     def start(self):
         self._running.set()
@@ -80,7 +86,7 @@ class RequestScheduler:
         self._running.clear()
 
     def _request_setup(self) -> None:
-        completed,request = self._generator.compose_request()
+        completed, request, request_type = self._generator.compose_request()
         if completed:
             LOGGER.info('Generation Done!')
             #self._scheduler.shutdown()
@@ -91,6 +97,9 @@ class RequestScheduler:
 
         if request is None:
             LOGGER.warning('No resources available to compose new request')
+            metrics = METRICS_POOL.get_metrics_loadgen('setup', labels={'request_type': request_type})
+            _, _, _, _, counter_blocked = metrics
+            counter_blocked.inc()
             return
 
         if 'service_id' in request:
@@ -101,7 +110,7 @@ class RequestScheduler:
             dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Setup Service: uuid=%s src=%s:%s dst=%s:%s',
                 service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._create_update(service=request)
+            self._create_update(request_type, service=request)
 
         elif 'slice_id' in request:
             slice_uuid = request['slice_id']['slice_uuid']['uuid']
@@ -111,12 +120,12 @@ class RequestScheduler:
             dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Setup Slice: uuid=%s src=%s:%s dst=%s:%s',
                 slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._create_update(slice_=request)
+            self._create_update(request_type, slice_=request)
 
         if self._parameters.do_teardown:
-            self._schedule_request_teardown(request)
+            self._schedule_request_teardown(request, request_type)
 
-    def _request_teardown(self, request : Dict) -> None:
+    def _request_teardown(self, request : Dict, request_type : str) -> None:
         if 'service_id' in request:
             service_uuid = request['service_id']['service_uuid']['uuid']
             src_device_uuid = request['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
@@ -125,7 +134,7 @@ class RequestScheduler:
             dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Teardown Service: uuid=%s src=%s:%s dst=%s:%s',
                 service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._delete(service_id=ServiceId(**(request['service_id'])))
+            self._delete(request_type, service_id=ServiceId(**(request['service_id'])))
 
         elif 'slice_id' in request:
             slice_uuid = request['slice_id']['slice_uuid']['uuid']
@@ -135,33 +144,64 @@ class RequestScheduler:
             dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid']
             LOGGER.info('Teardown Slice: uuid=%s src=%s:%s dst=%s:%s',
                 slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-            self._delete(slice_id=SliceId(**(request['slice_id'])))
+            self._delete(request_type, slice_id=SliceId(**(request['slice_id'])))
 
         self._generator.release_request(request)
 
-    def _create_update(self, service : Optional[Dict] = None, slice_ : Optional[Dict] = None) -> None:
+    def _create_update(
+        self, request_type : str, service : Optional[Dict] = None, slice_ : Optional[Dict] = None
+    ) -> None:
         if self._parameters.dry_mode: return
 
+        metrics = METRICS_POOL.get_metrics_loadgen('setup', labels={'request_type': request_type})
+        histogram_duration, counter_started, counter_completed, counter_failed, _ = metrics
+
         service_id = None
         if service is not None:
+            service_client = ServiceClient()
+
             service_add = copy.deepcopy(service)
             service_add['service_endpoint_ids'] = []
             service_add['service_constraints'] = []
             service_add['service_config'] = {'config_rules': []}
+            service_add = Service(**service_add)
+            service = Service(**service)
+
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    service_id = service_client.CreateService(service_add)
+                    service_id = service_client.UpdateService(service)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Setting Up Service {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(service)))
 
-            service_client = ServiceClient()
-            service_id = service_client.CreateService(Service(**service_add))
             service_client.close()
 
         slice_id = None
         if slice_ is not None:
+            slice_client = SliceClient()
+
             slice_add = copy.deepcopy(slice_)
             slice_add['slice_endpoint_ids'] = []
             slice_add['slice_constraints'] = []
             slice_add['slice_config'] = {'config_rules': []}
+            slice_add = Slice(**slice_add)
+            slice_ = Slice(**slice_)
+
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    slice_id = slice_client.CreateSlice(slice_add)
+                    slice_id = slice_client.UpdateSlice(slice_)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Setting Up Slice {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(slice_)))
 
-            slice_client = SliceClient()
-            slice_id = slice_client.CreateSlice(Slice(**slice_add))
             slice_client.close()
 
         if self._parameters.record_to_dlt:
@@ -171,41 +211,47 @@ class RequestScheduler:
                 slices_to_record=slices_to_record, services_to_record=services_to_record,
                 devices_to_record=devices_to_record, delete=False)
 
-        service_id = None
-        if service is not None:
-            service_client = ServiceClient()
-            service_id = service_client.UpdateService(Service(**service))
-            service_client.close()
+    def _delete(
+        self, request_type : str, service_id : Optional[ServiceId] = None, slice_id : Optional[SliceId] = None
+    ) -> None:
+        if self._parameters.dry_mode: return
 
-        slice_id = None
-        if slice_ is not None:
-            slice_client = SliceClient()
-            slice_id = slice_client.UpdateSlice(Slice(**slice_))
-            slice_client.close()
+        metrics = METRICS_POOL.get_metrics_loadgen('teardown', labels={'request_type': request_type})
+        histogram_duration, counter_started, counter_completed, counter_failed, _ = metrics
 
         if self._parameters.record_to_dlt:
             entities_to_record = explore_entities_to_record(slice_id=slice_id, service_id=service_id)
             slices_to_record, services_to_record, devices_to_record = entities_to_record
-            record_entities(
-                slices_to_record=slices_to_record, services_to_record=services_to_record,
-                devices_to_record=devices_to_record, delete=False)
 
-    def _delete(self, service_id : Optional[ServiceId] = None, slice_id : Optional[SliceId] = None) -> None:
-        if self._parameters.dry_mode: return
+        if service_id is not None:
+            service_client = ServiceClient()
 
-        if self._parameters.record_to_dlt:
-            entities_to_record = explore_entities_to_record(slice_id=slice_id, service_id=service_id)
-            slices_to_record, services_to_record, devices_to_record = entities_to_record
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    service_client.DeleteService(service_id)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Tearing Down Service {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(service_id)))
+
+            service_client.close()
 
         if slice_id is not None:
             slice_client = SliceClient()
-            slice_client.DeleteSlice(slice_id)
-            slice_client.close()
 
-        if service_id is not None:
-            service_client = ServiceClient()
-            service_client.DeleteService(service_id)
-            service_client.close()
+            with histogram_duration.time():
+                try:
+                    counter_started.inc()
+                    slice_client.DeleteSlice(slice_id)
+                    counter_completed.inc()
+                except: # pylint: disable=bare-except
+                    counter_failed.inc()
+                    MSG = 'Exception Tearing Down Slice {:s}'
+                    LOGGER.exception(MSG.format(grpc_message_to_json_string(slice_id)))
+
+            slice_client.close()
 
         if self._parameters.record_to_dlt:
             record_entities(
diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
index d66b0b2c10c5228e0c3d15759fc46b2c0770154d..41c12d8e461364c5994b9ba0989c8d4241d3e3fe 100644
--- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
+++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
@@ -21,6 +21,7 @@ from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer
 from load_generator.load_gen.Parameters import Parameters as LoadGen_Parameters
 from load_generator.load_gen.RequestGenerator import RequestGenerator
 from load_generator.load_gen.RequestScheduler import RequestScheduler
+from load_generator.tools.ListScalarRange import list_scalar_range__grpc_to_list, list_scalar_range__list_to_grpc
 from .Constants import REQUEST_TYPE_MAP, REQUEST_TYPE_REVERSE_MAP
 
 LOGGER = logging.getLogger(__name__)
@@ -34,15 +35,19 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
 
     def Start(self, request : Parameters, context : grpc.ServicerContext) -> Empty:
         self._parameters = LoadGen_Parameters(
-            num_requests       = request.num_requests,
-            request_types      = [REQUEST_TYPE_MAP[rt] for rt in request.request_types],
-            offered_load       = request.offered_load if request.offered_load > 1.e-12 else None,
-            holding_time       = request.holding_time if request.holding_time > 1.e-12 else None,
-            inter_arrival_time = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None,
-            do_teardown        = request.do_teardown,   # if set, schedule tear down of requests
-            dry_mode           = request.dry_mode,      # in dry mode, no request is sent to TeraFlowSDN
-            record_to_dlt      = request.record_to_dlt, # if set, upload changes to DLT
-            dlt_domain_id      = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True)
+            num_requests          = request.num_requests,
+            request_types         = [REQUEST_TYPE_MAP[rt] for rt in request.request_types],
+            offered_load          = request.offered_load if request.offered_load > 1.e-12 else None,
+            holding_time          = request.holding_time if request.holding_time > 1.e-12 else None,
+            inter_arrival_time    = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None,
+            availability_ranges   = list_scalar_range__grpc_to_list(request.availability  ),
+            capacity_gbps_ranges  = list_scalar_range__grpc_to_list(request.capacity_gbps ),
+            e2e_latency_ms_ranges = list_scalar_range__grpc_to_list(request.e2e_latency_ms),
+            max_workers           = request.max_workers,
+            do_teardown           = request.do_teardown,   # if set, schedule tear down of requests
+            dry_mode              = request.dry_mode,      # in dry mode, no request is sent to TeraFlowSDN
+            record_to_dlt         = request.record_to_dlt, # if set, upload changes to DLT
+            dlt_domain_id         = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True)
         )
 
         LOGGER.info('Initializing Generator...')
@@ -70,15 +75,23 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
         status.num_generated = self._scheduler.num_generated
         status.infinite_loop = self._scheduler.infinite_loop
         status.running       = self._scheduler.running
-        status.parameters.num_requests       = params.num_requests          # pylint: disable=no-member
-        status.parameters.offered_load       = params.offered_load          # pylint: disable=no-member
-        status.parameters.holding_time       = params.holding_time          # pylint: disable=no-member
-        status.parameters.inter_arrival_time = params.inter_arrival_time    # pylint: disable=no-member
-        status.parameters.do_teardown        = params.do_teardown           # pylint: disable=no-member
-        status.parameters.dry_mode           = params.dry_mode              # pylint: disable=no-member
-        status.parameters.record_to_dlt      = params.record_to_dlt         # pylint: disable=no-member
-        status.parameters.dlt_domain_id      = params.dlt_domain_id         # pylint: disable=no-member
-        status.parameters.request_types.extend(request_types)               # pylint: disable=no-member
+
+        stat_pars = status.parameters                               # pylint: disable=no-member
+        stat_pars.num_requests       = params.num_requests          # pylint: disable=no-member
+        stat_pars.offered_load       = params.offered_load          # pylint: disable=no-member
+        stat_pars.holding_time       = params.holding_time          # pylint: disable=no-member
+        stat_pars.inter_arrival_time = params.inter_arrival_time    # pylint: disable=no-member
+        stat_pars.max_workers        = params.max_workers           # pylint: disable=no-member
+        stat_pars.do_teardown        = params.do_teardown           # pylint: disable=no-member
+        stat_pars.dry_mode           = params.dry_mode              # pylint: disable=no-member
+        stat_pars.record_to_dlt      = params.record_to_dlt         # pylint: disable=no-member
+        stat_pars.dlt_domain_id      = params.dlt_domain_id         # pylint: disable=no-member
+        stat_pars.request_types.extend(request_types)               # pylint: disable=no-member
+
+        list_scalar_range__list_to_grpc(params.availability_ranges,   stat_pars.availability  ) # pylint: disable=no-member
+        list_scalar_range__list_to_grpc(params.capacity_gbps_ranges,  stat_pars.capacity_gbps ) # pylint: disable=no-member
+        list_scalar_range__list_to_grpc(params.e2e_latency_ms_ranges, stat_pars.e2e_latency_ms) # pylint: disable=no-member
+
         return status
 
     def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty:
diff --git a/src/load_generator/service/__main__.py b/src/load_generator/service/__main__.py
index 227099c59aa57f420c842a6210f3b8b146b23cda..7051a9a18bb2a86e2ca298b9ddfdc32f3e3fa6e7 100644
--- a/src/load_generator/service/__main__.py
+++ b/src/load_generator/service/__main__.py
@@ -13,14 +13,15 @@
 # limitations under the License.
 
 import logging, signal, sys, threading
+from prometheus_client import start_http_server
 from common.Constants import ServiceNameEnum
 from common.Settings import (
-    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level,
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
     wait_for_environment_variables)
 from .LoadGeneratorService import LoadGeneratorService
 
-log_level = get_log_level()
-logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+LOG_LEVEL = get_log_level()
+logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
 LOGGER = logging.getLogger(__name__)
 
 terminate = threading.Event()
@@ -39,10 +40,13 @@ def main():
         get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
+    LOGGER.info('Starting...')
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
-    LOGGER.info('Starting...')
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
 
     # Starting load generator service
     grpc_service = LoadGeneratorService()
diff --git a/src/load_generator/tools/ListScalarRange.py b/src/load_generator/tools/ListScalarRange.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a5a5f39940049adee6dfbe35befd815db9256fe
--- /dev/null
+++ b/src/load_generator/tools/ListScalarRange.py
@@ -0,0 +1,99 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+from typing import List, Optional, Tuple, Union
+
+from common.proto.load_generator_pb2 import ScalarOrRange
+
+# RegEx to validate strings formatted as: '1, 2.3, 4.5  .. 6.7 , .8...9, 10., .11'
+# IMPORTANT: this regex just validates data, it does not extract the pieces of data!
+RE_FLOAT = r'[\ ]*[0-9]*[\.]?[0-9]*[\ ]*'
+RE_RANGE = RE_FLOAT + r'(\.\.' + RE_FLOAT + r')?'
+RE_SCALAR_RANGE_LIST  = RE_RANGE + r'(\,' + RE_RANGE + r')*'
+
+Type_ListScalarRange = List[Union[float, Tuple[float, float]]]
+
+def parse_list_scalar_range(value : str) -> Type_ListScalarRange:
+    str_value = str(value).replace(' ', '')
+    ranges = [[float(value) for value in item.split('..')] for item in str_value.split(',')]
+    return ranges
+
+def list_scalar_range__list_to_grpc(list_scalar_range : Type_ListScalarRange, obj : List[ScalarOrRange]) -> None:
+    for i,scalar_or_range in enumerate(list_scalar_range):
+        if isinstance(scalar_or_range, (float, str)):
+            _scalar = obj.add()
+            _scalar.scalar = float(scalar_or_range)
+        elif isinstance(scalar_or_range, (list, tuple)):
+            if len(scalar_or_range) == 1:
+                _scalar = obj.add()
+                _scalar.scalar = float(scalar_or_range[0])
+            elif len(scalar_or_range) == 2:
+                _range = obj.add()
+                _range.range.minimum = float(scalar_or_range[0])
+                _range.range.maximum = float(scalar_or_range[1])
+            else:
+                MSG = 'List/tuple with {:d} items in item(#{:d}, {:s})'
+                raise NotImplementedError(MSG.format(len(scalar_or_range), i, str(scalar_or_range)))
+        else:
+            MSG = 'Type({:s}) in item(#{:d}, {:s})'
+            raise NotImplementedError(MSG.format(str(type(scalar_or_range), i, str(scalar_or_range))))
+
+def list_scalar_range__grpc_to_str(obj : List[ScalarOrRange]) -> str:
+    str_items = list()
+    for item in obj:
+        item_kind = item.WhichOneof('value')
+        if item_kind == 'scalar':
+            str_items.append(str(item.scalar))
+        elif item_kind == 'range':
+            str_items.append('{:s}..{:s}'.format(str(item.range.minimum), str(item.range.maximum)))
+        else:
+            raise NotImplementedError('Unsupported ScalarOrRange kind({:s})'.format(str(item_kind)))
+    return ','.join(str_items)
+
+def list_scalar_range__grpc_to_list(obj : List[ScalarOrRange]) -> Type_ListScalarRange:
+    list_scalar_range = list()
+    for item in obj:
+        item_kind = item.WhichOneof('value')
+        if item_kind == 'scalar':
+            scalar_or_range = float(item.scalar)
+        elif item_kind == 'range':
+            scalar_or_range = (float(item.range.minimum), float(item.range.maximum))
+        else:
+            raise NotImplementedError('Unsupported ScalarOrRange kind({:s})'.format(str(item_kind)))
+        list_scalar_range.append(scalar_or_range)
+    return list_scalar_range
+
+def generate_value(
+    list_scalar_range : Type_ListScalarRange, ndigits : Optional[int] = None
+) -> float:
+    scalar_or_range = random.choice(list_scalar_range)
+    if isinstance(scalar_or_range, (float, str)):
+        value = float(scalar_or_range)
+    elif isinstance(scalar_or_range, (list, tuple)):
+        if len(scalar_or_range) == 1:
+            value = float(scalar_or_range[0])
+        elif len(scalar_or_range) == 2:
+            minimum = float(scalar_or_range[0])
+            maximum = float(scalar_or_range[1])
+            value = random.uniform(minimum, maximum)
+        else:
+            MSG = 'List/tuple with {:d} items in item({:s})'
+            raise NotImplementedError(MSG.format(len(scalar_or_range), str(scalar_or_range)))
+    else:
+        MSG = 'Type({:s}) in item({:s})'
+        raise NotImplementedError(MSG.format(str(type(scalar_or_range), str(scalar_or_range))))
+
+    if ndigits is None: return value
+    return round(value, ndigits=ndigits)
diff --git a/src/load_generator/tools/__init__.py b/src/load_generator/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..38d04994fb0fa1951fb465bc127eb72659dc2eaf
--- /dev/null
+++ b/src/load_generator/tools/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile
index 7c718890fcf3f07b32f66eca2ecab41f2eb30fbb..2a1510954dbd2a9b0817f94145baaa22ac9d3a3f 100644
--- a/src/webui/Dockerfile
+++ b/src/webui/Dockerfile
@@ -79,6 +79,7 @@ COPY --chown=webui:webui src/device/__init__.py device/__init__.py
 COPY --chown=webui:webui src/device/client/. device/client/
 COPY --chown=webui:webui src/load_generator/__init__.py load_generator/__init__.py
 COPY --chown=webui:webui src/load_generator/client/. load_generator/client/
+COPY --chown=webui:webui src/load_generator/tools/. load_generator/tools/
 COPY --chown=webui:webui src/service/__init__.py service/__init__.py
 COPY --chown=webui:webui src/service/client/. service/client/
 COPY --chown=webui:webui src/slice/__init__.py slice/__init__.py
diff --git a/src/webui/grafana_prom_load_generator.json b/src/webui/grafana_prom_load_generator.json
new file mode 100644
index 0000000000000000000000000000000000000000..efdc8a1180e0d44f726becf5c8125c34779f7c78
--- /dev/null
+++ b/src/webui/grafana_prom_load_generator.json
@@ -0,0 +1,399 @@
+{"overwrite": true, "folderId": 0, "dashboard":
+  {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": {
+            "type": "datasource",
+            "uid": "grafana"
+          },
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "target": {
+            "limit": 100,
+            "matchAny": false,
+            "tags": [],
+            "type": "dashboard"
+          },
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "fiscalYearStartMonth": 0,
+    "graphTooltip": 0,
+    "id": null,
+    "iteration": 1682528742676,
+    "links": [],
+    "liveNow": false,
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": {
+          "type": "prometheus",
+          "uid": "prometheus"
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "8.5.22",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_started_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "started",
+            "refId": "A"
+          },
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_completed_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "completed",
+            "refId": "B"
+          },
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_failed_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "failed",
+            "refId": "C"
+          },
+          {
+            "datasource": {
+              "type": "prometheus",
+              "uid": "prometheus"
+            },
+            "editorMode": "code",
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_counter_requests_blocked_total{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "blocked",
+            "range": true,
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeRegions": [],
+        "title": "Requests",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "mode": "time",
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:935",
+            "format": "short",
+            "logBase": 1,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:936",
+            "format": "short",
+            "logBase": 1,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false
+        }
+      },
+      {
+        "cards": {},
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "linear",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": {
+          "type": "prometheus",
+          "uid": "prometheus"
+        },
+        "gridPos": {
+          "h": 8,
+          "w": 24,
+          "x": 0,
+          "y": 6
+        },
+        "heatmap": {},
+        "hideZeroBuckets": true,
+        "highlightCards": true,
+        "id": 2,
+        "interval": "60s",
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(\r\n    max_over_time(tfs_loadgen_requests_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m]) -\r\n    min_over_time(tfs_loadgen_requests_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m])\r\n) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "1m",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "refId": "A"
+          }
+        ],
+        "title": "Histogram",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "yAxis": {
+          "format": "s",
+          "logBase": 1,
+          "show": true
+        },
+        "yBucketBound": "auto"
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": {
+          "type": "prometheus",
+          "uid": "prometheus"
+        },
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 6,
+          "w": 24,
+          "x": 0,
+          "y": 14
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "rightSide": false,
+          "show": false,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "nullPointMode": "null",
+        "options": {
+          "alertThreshold": true
+        },
+        "percentage": false,
+        "pluginVersion": "8.5.22",
+        "pointradius": 2,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(tfs_loadgen_requests_[[method]]_histogram_duration_sum{pod=~\"[[pod]]\"})",
+            "hide": false,
+            "interval": "",
+            "legendFormat": "total time",
+            "refId": "B"
+          }
+        ],
+        "thresholds": [],
+        "timeRegions": [],
+        "title": "Total Exec Time",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "transformations": [],
+        "type": "graph",
+        "xaxis": {
+          "mode": "time",
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "$$hashKey": "object:407",
+            "format": "s",
+            "logBase": 1,
+            "min": "0",
+            "show": true
+          },
+          {
+            "$$hashKey": "object:408",
+            "format": "short",
+            "logBase": 1,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false
+        }
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 36,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": false,
+            "text": "setup",
+            "value": "setup"
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "metrics(tfs_loadgen_requests_)",
+          "hide": 0,
+          "includeAll": false,
+          "label": "Method",
+          "multi": false,
+          "name": "method",
+          "options": [],
+          "query": {
+            "query": "metrics(tfs_loadgen_requests_)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "/tfs_loadgen_requests_(.+)_histogram_duration_bucket/",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        },
+        {
+          "allValue": ".*",
+          "current": {
+            "selected": true,
+            "text": [
+              "All"
+            ],
+            "value": [
+              "$__all"
+            ]
+          },
+          "datasource": {
+            "type": "prometheus",
+            "uid": "prometheus"
+          },
+          "definition": "label_values(tfs_loadgen_requests_[[method]]_histogram_duration_bucket, pod)",
+          "hide": 0,
+          "includeAll": true,
+          "label": "Pod",
+          "multi": true,
+          "name": "pod",
+          "options": [],
+          "query": {
+            "query": "label_values(tfs_loadgen_requests_[[method]]_histogram_duration_bucket, pod)",
+            "refId": "StandardVariableQuery"
+          },
+          "refresh": 2,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-15m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / Load Generator Status",
+    "uid": "tfs-loadgen-stats",
+    "version": 3,
+    "weekStart": ""
+  }
+}
diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py
index 4e0020b04f33152de382f5b93af9735f8d737f92..8092b3193563df52e9c538e20890835f799fe8ee 100644
--- a/src/webui/service/load_gen/forms.py
+++ b/src/webui/service/load_gen/forms.py
@@ -14,7 +14,12 @@
 
 from flask_wtf import FlaskForm
 from wtforms import BooleanField, FloatField, IntegerField, StringField, SubmitField
-from wtforms.validators import DataRequired, NumberRange
+from wtforms.validators import DataRequired, NumberRange, Regexp
+from load_generator.tools.ListScalarRange import RE_SCALAR_RANGE_LIST
+
+DEFAULT_AVAILABILITY   = '0.0..99.9999'
+DEFAULT_CAPACITY_GBPS  = '0.1..100.00' #'10, 40, 50, 100, 400'
+DEFAULT_E2E_LATENCY_MS = '5.0..100.00'
 
 class LoadGenForm(FlaskForm):
     num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)])
@@ -31,6 +36,12 @@ class LoadGenForm(FlaskForm):
     holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)])
     inter_arrival_time = FloatField('Inter Arrival Time [seconds]', default=0, validators=[NumberRange(min=0.0)])
 
+    availability   = StringField('Availability [%]', default=DEFAULT_AVAILABILITY,   validators=[Regexp(RE_SCALAR_RANGE_LIST)])
+    capacity_gbps  = StringField('Capacity [Gbps]',  default=DEFAULT_CAPACITY_GBPS,  validators=[Regexp(RE_SCALAR_RANGE_LIST)])
+    e2e_latency_ms = StringField('E2E Latency [ms]', default=DEFAULT_E2E_LATENCY_MS, validators=[Regexp(RE_SCALAR_RANGE_LIST)])
+
+    max_workers = IntegerField('Max Workers', default=10, validators=[DataRequired(), NumberRange(min=1)])
+
     do_teardown = BooleanField('Do Teardown', default=True)
 
     record_to_dlt = BooleanField('Record to DLT', default=False)
diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py
index 5f47f06b0ff59ad1383aab94caa41adc08440c87..1b4f9c6ba971c17fc3ac9216aad6cc39b20c8151 100644
--- a/src/webui/service/load_gen/routes.py
+++ b/src/webui/service/load_gen/routes.py
@@ -17,6 +17,8 @@ from flask import redirect, render_template, Blueprint, flash, url_for
 from common.proto.context_pb2 import Empty
 from common.proto.load_generator_pb2 import Parameters, RequestTypeEnum
 from load_generator.client.LoadGeneratorClient import LoadGeneratorClient
+from load_generator.tools.ListScalarRange import (
+    list_scalar_range__grpc_to_str, list_scalar_range__list_to_grpc, parse_list_scalar_range)
 from .forms import LoadGenForm
 
 load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen')
@@ -55,11 +57,19 @@ def home():
     _holding_time       = round(status.parameters.holding_time       , ndigits=4)
     _inter_arrival_time = round(status.parameters.inter_arrival_time , ndigits=4)
 
+    _availability       = list_scalar_range__grpc_to_str(status.parameters.availability  )
+    _capacity_gbps      = list_scalar_range__grpc_to_str(status.parameters.capacity_gbps )
+    _e2e_latency_ms     = list_scalar_range__grpc_to_str(status.parameters.e2e_latency_ms)
+
     form = LoadGenForm()
     set_properties(form.num_requests             , status.parameters.num_requests , readonly=status.running)
     set_properties(form.offered_load             , _offered_load                  , readonly=status.running)
     set_properties(form.holding_time             , _holding_time                  , readonly=status.running)
     set_properties(form.inter_arrival_time       , _inter_arrival_time            , readonly=status.running)
+    set_properties(form.availability             , _availability                  , readonly=status.running)
+    set_properties(form.capacity_gbps            , _capacity_gbps                 , readonly=status.running)
+    set_properties(form.e2e_latency_ms           , _e2e_latency_ms                , readonly=status.running)
+    set_properties(form.max_workers              , status.parameters.max_workers  , readonly=status.running)
     set_properties(form.do_teardown              , status.parameters.do_teardown  , disabled=status.running)
     set_properties(form.record_to_dlt            , status.parameters.record_to_dlt, disabled=status.running)
     set_properties(form.dlt_domain_id            , status.parameters.dlt_domain_id, readonly=status.running)
@@ -82,16 +92,25 @@ def start():
     form = LoadGenForm()
     if form.validate_on_submit():
         try:
+            _availability   = parse_list_scalar_range(form.availability.data  )
+            _capacity_gbps  = parse_list_scalar_range(form.capacity_gbps.data )
+            _e2e_latency_ms = parse_list_scalar_range(form.e2e_latency_ms.data)
+
             load_gen_params = Parameters()
             load_gen_params.num_requests       = form.num_requests.data
             load_gen_params.offered_load       = form.offered_load.data
             load_gen_params.holding_time       = form.holding_time.data
             load_gen_params.inter_arrival_time = form.inter_arrival_time.data
+            load_gen_params.max_workers        = form.max_workers.data
             load_gen_params.do_teardown        = form.do_teardown.data
             load_gen_params.dry_mode           = False
             load_gen_params.record_to_dlt      = form.record_to_dlt.data
             load_gen_params.dlt_domain_id      = form.dlt_domain_id.data
 
+            list_scalar_range__list_to_grpc(_availability,   load_gen_params.availability  ) # pylint: disable=no-member
+            list_scalar_range__list_to_grpc(_capacity_gbps,  load_gen_params.capacity_gbps ) # pylint: disable=no-member
+            list_scalar_range__list_to_grpc(_e2e_latency_ms, load_gen_params.e2e_latency_ms) # pylint: disable=no-member
+
             del load_gen_params.request_types[:] # pylint: disable=no-member
             request_types = list()
             if form.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM)
diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html
index d58f42601925ca438ab9d9f20b32f94960b5cada..046459acf12068b2cb9d8529d4454c36609613d5 100644
--- a/src/webui/service/templates/load_gen/home.html
+++ b/src/webui/service/templates/load_gen/home.html
@@ -113,6 +113,66 @@
             </div>
             <br />
 
+            <div class="row mb-3">
+                {{ form.availability.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.availability.errors %}
+                        {{ form.availability(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.availability.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.availability(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.capacity_gbps.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.capacity_gbps.errors %}
+                        {{ form.capacity_gbps(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.capacity_gbps.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.capacity_gbps(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.e2e_latency_ms.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.e2e_latency_ms.errors %}
+                        {{ form.e2e_latency_ms(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.e2e_latency_ms.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.e2e_latency_ms(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
+            <div class="row mb-3">
+                {{ form.max_workers.label(class="col-sm-2 col-form-label") }}
+                <div class="col-sm-10">
+                    {% if form.max_workers.errors %}
+                        {{ form.max_workers(class="form-control is-invalid") }}
+                        <div class="invalid-feedback">
+                            {% for error in form.max_workers.errors %}<span>{{ error }}</span>{% endfor %}
+                        </div>
+                    {% else %}
+                        {{ form.max_workers(class="form-control") }}
+                    {% endif %}
+                </div>
+            </div>
+            <br />
+
             <div class="row mb-3">
                 <div class="col-sm-10">
                     {{ form.do_teardown }} {{ form.do_teardown.label(class="col-sm-3 col-form-label") }}<br/>