diff --git a/src/tests/tools/load_gen/Constants.py b/src/tests/tools/load_gen/Constants.py
index 28c1c65be3c03a073abeefc84bdb3731b1eaf581..94d80bfdf015d770363fa5dce3929f19ec743d5a 100644
--- a/src/tests/tools/load_gen/Constants.py
+++ b/src/tests/tools/load_gen/Constants.py
@@ -12,6 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-SERVICE_TYPE_L2NM = 'l2nm'
-SERVICE_TYPE_L3NM = 'l3nm'
-SERVICE_TYPE_TAPI = 'tapi'
+REQUEST_TYPE_SERVICE_L2NM = 'svc-l2nm'
+REQUEST_TYPE_SERVICE_L3NM = 'svc-l3nm'
+REQUEST_TYPE_SERVICE_TAPI = 'svc-tapi'
+REQUEST_TYPE_SLICE_L2NM   = 'slc-l2nm'
+REQUEST_TYPE_SLICE_L3NM   = 'slc-l3nm'
diff --git a/src/tests/tools/load_gen/Parameters.py b/src/tests/tools/load_gen/Parameters.py
index fa2ac01f3b7a5ffd79f515962a54945402405b60..9aab3b1470276eb7ae3d4ed132b44aebb2b1eb55 100644
--- a/src/tests/tools/load_gen/Parameters.py
+++ b/src/tests/tools/load_gen/Parameters.py
@@ -16,12 +16,12 @@ from typing import List, Optional
 
 class Parameters:
     def __init__(
-        self, num_services : int, service_types : List[str], offered_load : Optional[float] = None,
+        self, num_requests : int, request_types : List[str], offered_load : Optional[float] = None,
         inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None,
         dry_mode : bool = False
     ) -> None:
-        self._num_services = num_services
-        self._service_types = service_types
+        self._num_requests = num_requests
+        self._request_types = request_types
         self._offered_load = offered_load
         self._inter_arrival_time = inter_arrival_time
         self._holding_time = holding_time
@@ -35,10 +35,10 @@ class Parameters:
             self._holding_time = self._offered_load * self._inter_arrival_time
 
     @property
-    def num_services(self): return self._num_services
+    def num_requests(self): return self._num_requests
 
     @property
-    def service_types(self): return self._service_types
+    def request_types(self): return self._request_types
 
     @property
     def offered_load(self): return self._offered_load
diff --git a/src/tests/tools/load_gen/ServiceGenerator.py b/src/tests/tools/load_gen/RequestGenerator.py
similarity index 62%
rename from src/tests/tools/load_gen/ServiceGenerator.py
rename to src/tests/tools/load_gen/RequestGenerator.py
index 86a70d9c5c7bd84c8ff4b98926f94329798dd6d5..6d0be9d9c5f75243cb9b92260e8889fbe9cdecc2 100644
--- a/src/tests/tools/load_gen/ServiceGenerator.py
+++ b/src/tests/tools/load_gen/RequestGenerator.py
@@ -21,8 +21,11 @@ from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.tools.object_factory.Service import (
     json_service_l2nm_planned, json_service_l3nm_planned, json_service_tapi_planned)
+from common.tools.object_factory.Slice import json_slice
 from context.client.ContextClient import ContextClient
-from .Constants import SERVICE_TYPE_L2NM, SERVICE_TYPE_L3NM, SERVICE_TYPE_TAPI
+from .Constants import (
+    REQUEST_TYPE_SERVICE_L2NM, REQUEST_TYPE_SERVICE_L3NM, REQUEST_TYPE_SERVICE_TAPI,
+    REQUEST_TYPE_SLICE_L2NM, REQUEST_TYPE_SLICE_L3NM)
 from .Parameters import Parameters
 
 LOGGER = logging.getLogger(__name__)
@@ -32,11 +35,11 @@ ENDPOINT_COMPATIBILITY = {
     'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT'  : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT',
 }
 
-class ServiceGenerator:
+class RequestGenerator:
     def __init__(self, parameters : Parameters) -> None:
         self._parameters = parameters
         self._lock = threading.Lock()
-        self._num_services = 0
+        self._num_requests = 0
         self._available_device_endpoints : Dict[str, Set[str]] = dict()
         self._used_device_endpoints : Dict[str, Dict[str, str]] = dict()
         self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict()
@@ -79,7 +82,7 @@ class ServiceGenerator:
                     endpoints_for_type.discard(endpoint_key)
 
     @property
-    def num_services_generated(self): return self._num_services
+    def num_requests_generated(self): return self._num_requests
 
     def dump_state(self) -> None:
         with self._lock:
@@ -139,19 +142,26 @@ class ServiceGenerator:
             self._used_device_endpoints.setdefault(device_uuid, set()).pop(endpoint_uuid, None)
             self._available_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid)
 
-    def compose_service(self) -> Optional[Dict]:
+    def compose_request(self) -> Optional[Dict]:
         with self._lock:
-            self._num_services += 1
-            num_service = self._num_services
-        #service_uuid = str(uuid.uuid4())
-        service_uuid = 'svc_{:d}'.format(num_service)
+            self._num_requests += 1
+            num_request = self._num_requests
+
+        #request_uuid = str(uuid.uuid4())
+        request_uuid = 'svc_{:d}'.format(num_request)
         
-        # choose service type
-        service_type = random.choice(self._parameters.service_types)
+        # choose request type
+        request_type = random.choice(self._parameters.request_types)
+
+        if request_type in {REQUEST_TYPE_SERVICE_L2NM, REQUEST_TYPE_SERVICE_L3NM, REQUEST_TYPE_SERVICE_TAPI}:
+            return self._compose_service(num_request, request_uuid, request_type)
+        elif request_type in {REQUEST_TYPE_SLICE_L2NM, REQUEST_TYPE_SLICE_L3NM}:
+            return self._compose_slice(num_request, request_uuid, request_type)
 
+    def _compose_service(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]:
         # choose source endpoint
-        src_endpoint_types = set(ENDPOINT_COMPATIBILITY.keys()) if service_type in {SERVICE_TYPE_TAPI} else None
-        src = self._use_device_endpoint(service_uuid, endpoint_types=src_endpoint_types)
+        src_endpoint_types = set(ENDPOINT_COMPATIBILITY.keys()) if request_type in {REQUEST_TYPE_SERVICE_TAPI} else None
+        src = self._use_device_endpoint(request_uuid, endpoint_types=src_endpoint_types)
         if src is None:
             LOGGER.warning('>> No source endpoint is available')
             return None
@@ -160,14 +170,14 @@ class ServiceGenerator:
         # identify compatible destination endpoint types
         src_endpoint_type = self._endpoint_ids_to_types.get((src_device_uuid,src_endpoint_uuid))
         dst_endpoint_type = ENDPOINT_COMPATIBILITY.get(src_endpoint_type)
-        dst_endpoint_types = {dst_endpoint_type} if service_type in {SERVICE_TYPE_TAPI} else None
+        dst_endpoint_types = {dst_endpoint_type} if request_type in {REQUEST_TYPE_SERVICE_TAPI} else None
 
-        # identify expluded destination devices
-        exclude_device_uuids = {} if service_type in {SERVICE_TYPE_TAPI} else {src_device_uuid}
+        # identify excluded destination devices
+        exclude_device_uuids = {} if request_type in {REQUEST_TYPE_SERVICE_TAPI} else {src_device_uuid}
 
         # choose feasible destination endpoint
         dst = self._use_device_endpoint(
-            service_uuid, endpoint_types=dst_endpoint_types, exclude_device_uuids=exclude_device_uuids)
+            request_uuid, endpoint_types=dst_endpoint_types, exclude_device_uuids=exclude_device_uuids)
         
         # if destination endpoint not found, release source, and terminate current service generation
         if dst is None:
@@ -182,12 +192,12 @@ class ServiceGenerator:
             json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
         ]
 
-        if service_type == SERVICE_TYPE_L2NM:
+        if request_type == REQUEST_TYPE_SERVICE_L2NM:
             constraints = [
                 json_constraint_custom('bandwidth[gbps]', '10.0'),
                 json_constraint_custom('latency[ms]',     '20.0'),
             ]
-            vlan_id = num_service % 1000
+            vlan_id = num_request % 1000
             circuit_id = '{:03d}'.format(vlan_id)
             src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
             dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
@@ -211,15 +221,15 @@ class ServiceGenerator:
                 }),
             ]
             return json_service_l2nm_planned(
-                service_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
+                request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
-        elif service_type == SERVICE_TYPE_L3NM:
+        elif request_type == REQUEST_TYPE_SERVICE_L3NM:
             constraints = [
                 json_constraint_custom('bandwidth[gbps]', '10.0'),
                 json_constraint_custom('latency[ms]',     '20.0'),
             ]
-            vlan_id = num_service % 1000
-            bgp_as = 60000 + (num_service % 10000)
+            vlan_id = num_request % 1000
+            bgp_as = 60000 + (num_request % 10000)
             bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333)
             route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id)
             src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
@@ -250,9 +260,9 @@ class ServiceGenerator:
                 }),
             ]
             return json_service_l3nm_planned(
-                service_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
+                request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
-        elif service_type == SERVICE_TYPE_TAPI:
+        elif request_type == REQUEST_TYPE_SERVICE_TAPI:
             config_rules = [
                 json_config_rule_set('/settings', {
                     'capacity_value'  : 50.0,
@@ -263,10 +273,108 @@ class ServiceGenerator:
                 }),
             ]
             return json_service_tapi_planned(
-                service_uuid, endpoint_ids=endpoint_ids, constraints=[], config_rules=config_rules)
+                request_uuid, endpoint_ids=endpoint_ids, constraints=[], config_rules=config_rules)
+
+    def _compose_slice(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]:
+        # choose source endpoint
+        src = self._use_device_endpoint(request_uuid)
+        if src is None:
+            LOGGER.warning('>> No source endpoint is available')
+            return None
+        src_device_uuid,src_endpoint_uuid = src
+
+        # identify excluded destination devices
+        exclude_device_uuids = {} if request_type in {REQUEST_TYPE_SERVICE_TAPI} else {src_device_uuid}
+
+        # choose feasible destination endpoint
+        dst = self._use_device_endpoint(request_uuid, exclude_device_uuids=exclude_device_uuids)
+        
+        # if destination endpoint not found, release source, and terminate current service generation
+        if dst is None:
+            LOGGER.warning('>> No destination endpoint is available')
+            self._release_device_endpoint(src_device_uuid, src_endpoint_uuid)
+            return None
+
+        # compose endpoints
+        dst_device_uuid,dst_endpoint_uuid = dst
+        endpoint_ids = [
+            json_endpoint_id(json_device_id(src_device_uuid), src_endpoint_uuid),
+            json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid),
+        ]
+        constraints = [
+            json_constraint_custom('bandwidth[gbps]', '10.0'),
+            json_constraint_custom('latency[ms]',     '20.0'),
+        ]
+
+        if request_type == REQUEST_TYPE_SLICE_L2NM:
+            vlan_id = num_request % 1000
+            circuit_id = '{:03d}'.format(vlan_id)
+            src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
+            dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
+            config_rules = [
+                json_config_rule_set('/settings', {
+                    'mtu': 1512
+                }),
+                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
+                    'router_id': src_router_id,
+                    'sub_interface_index': vlan_id,
+                    'vlan_id': vlan_id,
+                    'remote_router': dst_router_id,
+                    'circuit_id': circuit_id,
+                }),
+                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
+                    'router_id': dst_router_id,
+                    'sub_interface_index': vlan_id,
+                    'vlan_id': vlan_id,
+                    'remote_router': src_router_id,
+                    'circuit_id': circuit_id,
+                }),
+            ]
+
+        elif request_type == REQUEST_TYPE_SLICE_L3NM:
+            vlan_id = num_request % 1000
+            bgp_as = 60000 + (num_request % 10000)
+            bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333)
+            route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id)
+            src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
+            dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
+            src_address_ip = '.'.join([src_device_uuid.replace('R', ''), '0'] + src_endpoint_uuid.split('/'))
+            dst_address_ip = '.'.join([dst_device_uuid.replace('R', ''), '0'] + dst_endpoint_uuid.split('/'))
+            config_rules = [
+                json_config_rule_set('/settings', {
+                    'mtu'             : 1512,
+                    'bgp_as'          : bgp_as,
+                    'bgp_route_target': bgp_route_target,
+                }),
+                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
+                    'router_id'          : src_router_id,
+                    'route_distinguisher': route_distinguisher,
+                    'sub_interface_index': vlan_id,
+                    'vlan_id'            : vlan_id,
+                    'address_ip'         : src_address_ip,
+                    'address_prefix'     : 16,
+                }),
+                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
+                    'router_id'          : dst_router_id,
+                    'route_distinguisher': route_distinguisher,
+                    'sub_interface_index': vlan_id,
+                    'vlan_id'            : vlan_id,
+                    'address_ip'         : dst_address_ip,
+                    'address_prefix'     : 16,
+                }),
+            ]
+
+        return json_slice(
+            request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
 
-    def release_service(self, json_service : Dict) -> None:
-        for endpoint_id in json_service['service_endpoint_ids']:
-            device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
-            endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
-            self._release_device_endpoint(device_uuid, endpoint_uuid)
+    def release_request(self, json_request : Dict) -> None:
+        if 'service_id' in json_request:
+            for endpoint_id in json_request['service_endpoint_ids']:
+                device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
+                endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
+                self._release_device_endpoint(device_uuid, endpoint_uuid)
+        elif 'slice_id' in json_request:
+            for endpoint_id in json_request['slice_endpoint_ids']:
+                device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
+                endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
+                self._release_device_endpoint(device_uuid, endpoint_uuid)
diff --git a/src/tests/tools/load_gen/RequestScheduler.py b/src/tests/tools/load_gen/RequestScheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..5be579c71f26bf133098aa14e92fd7e3e9387e57
--- /dev/null
+++ b/src/tests/tools/load_gen/RequestScheduler.py
@@ -0,0 +1,144 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, logging, pytz, random
+from datetime import datetime, timedelta
+from apscheduler.executors.pool import ThreadPoolExecutor
+from apscheduler.jobstores.memory import MemoryJobStore
+from apscheduler.schedulers.blocking import BlockingScheduler
+from typing import Dict
+from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId
+from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
+from .Parameters import Parameters
+from .RequestGenerator import RequestGenerator
+
+logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
+logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
+
+LOGGER = logging.getLogger(__name__)
+
+class RequestScheduler:
+    def __init__(self, parameters : Parameters, generator : RequestGenerator) -> None:
+        self._scheduler = BlockingScheduler()
+        self._scheduler.configure(
+            jobstores = {'default': MemoryJobStore()},
+            executors = {'default': ThreadPoolExecutor(max_workers=10)},
+            job_defaults = {
+                'coalesce': False,
+                'max_instances': 100,
+                'misfire_grace_time': 120,
+            },
+            timezone=pytz.utc)
+        self._parameters = parameters
+        self._generator = generator
+
+    def _schedule_request_setup(self) -> None:
+        if self._generator.num_requests_generated >= self._parameters.num_requests:
+            LOGGER.info('Generation Done!')
+            #self._scheduler.shutdown()
+            return
+        iat = random.expovariate(1.0 / self._parameters.inter_arrival_time)
+        run_date = datetime.utcnow() + timedelta(seconds=iat)
+        self._scheduler.add_job(
+            self._request_setup, trigger='date', run_date=run_date, timezone=pytz.utc)
+
+    def _schedule_request_teardown(self, request : Dict) -> None:
+        ht  = random.expovariate(1.0 / self._parameters.holding_time)
+        run_date = datetime.utcnow() + timedelta(seconds=ht)
+        self._scheduler.add_job(
+            self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc)
+
+    def start(self):
+        self._schedule_request_setup()
+        self._scheduler.start()
+
+    def _request_setup(self) -> None:
+        self._schedule_request_setup()
+
+        request = self._generator.compose_request()
+        if request is None:
+            LOGGER.warning('No resources available to compose new request')
+            return
+
+        if 'service_id' in request:
+            service_uuid = request['service_id']['service_uuid']['uuid']
+            src_device_uuid = request['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
+            src_endpoint_uuid = request['service_endpoint_ids'][0]['endpoint_uuid']['uuid']
+            dst_device_uuid = request['service_endpoint_ids'][1]['device_id']['device_uuid']['uuid']
+            dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
+            LOGGER.info('Setup Service: uuid=%s src=%s:%s dst=%s:%s',
+                service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
+
+            if not self._parameters.dry_mode:
+                request_add = copy.deepcopy(request)
+                request_add['service_endpoint_ids'] = []
+                request_add['service_constraints'] = []
+                request_add['service_config'] = {'config_rules': []}
+                service_client = ServiceClient()    # create instances per request to load balance between pods
+                service_client.CreateService(Service(**request_add))
+                service_client.UpdateService(Service(**request))
+                service_client.close()
+
+        elif 'slice_id' in request:
+            slice_uuid = request['slice_id']['slice_uuid']['uuid']
+            src_device_uuid = request['slice_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
+            src_endpoint_uuid = request['slice_endpoint_ids'][0]['endpoint_uuid']['uuid']
+            dst_device_uuid = request['slice_endpoint_ids'][1]['device_id']['device_uuid']['uuid']
+            dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid']
+            LOGGER.info('Setup Slice: uuid=%s src=%s:%s dst=%s:%s',
+                slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
+
+            if not self._parameters.dry_mode:
+                request_add = copy.deepcopy(request)
+                request_add['slice_endpoint_ids'] = []
+                request_add['slice_constraints'] = []
+                request_add['slice_config'] = {'config_rules': []}
+                slice_client = SliceClient()    # create instances per request to load balance between pods
+                slice_client.CreateSlice(Slice(**request_add))
+                slice_client.UpdateSlice(Slice(**request))
+                slice_client.close()
+
+        self._schedule_request_teardown(request)
+
+    def _request_teardown(self, request : Dict) -> None:
+        if 'service_id' in request:
+            service_uuid = request['service_id']['service_uuid']['uuid']
+            src_device_uuid = request['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
+            src_endpoint_uuid = request['service_endpoint_ids'][0]['endpoint_uuid']['uuid']
+            dst_device_uuid = request['service_endpoint_ids'][1]['device_id']['device_uuid']['uuid']
+            dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
+            LOGGER.info('Teardown Service: uuid=%s src=%s:%s dst=%s:%s',
+                service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
+
+            if not self._parameters.dry_mode:
+                service_client = ServiceClient()    # create instances per request to load balance between pods
+                service_client.DeleteService(ServiceId(**(request['service_id'])))
+                service_client.close()
+
+        elif 'slice_id' in request:
+            slice_uuid = request['slice_id']['slice_uuid']['uuid']
+            src_device_uuid = request['slice_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
+            src_endpoint_uuid = request['slice_endpoint_ids'][0]['endpoint_uuid']['uuid']
+            dst_device_uuid = request['slice_endpoint_ids'][1]['device_id']['device_uuid']['uuid']
+            dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid']
+            LOGGER.info('Teardown Slice: uuid=%s src=%s:%s dst=%s:%s',
+                slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
+
+            if not self._parameters.dry_mode:
+                slice_client = SliceClient()    # create instances per request to load balance between pods
+                slice_client.DeleteSlice(SliceId(**(request['slice_id'])))
+                slice_client.close()
+
+        self._generator.release_request(request)
diff --git a/src/tests/tools/load_gen/ServiceScheduler.py b/src/tests/tools/load_gen/ServiceScheduler.py
deleted file mode 100644
index 5a8b8dbdf02a7020186453b49f313c41ab850ef3..0000000000000000000000000000000000000000
--- a/src/tests/tools/load_gen/ServiceScheduler.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy, logging, pytz, random
-from datetime import datetime, timedelta
-from apscheduler.executors.pool import ThreadPoolExecutor
-from apscheduler.jobstores.memory import MemoryJobStore
-from apscheduler.schedulers.blocking import BlockingScheduler
-from typing import Dict
-from common.proto.context_pb2 import Service, ServiceId
-from service.client.ServiceClient import ServiceClient
-from .Parameters import Parameters
-from .ServiceGenerator import ServiceGenerator
-
-logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
-logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
-
-LOGGER = logging.getLogger(__name__)
-
-class ServiceScheduler:
-    def __init__(self, parameters : Parameters, service_generator : ServiceGenerator) -> None:
-        self._scheduler = BlockingScheduler()
-        self._scheduler.configure(
-            jobstores = {'default': MemoryJobStore()},
-            executors = {'default': ThreadPoolExecutor(max_workers=10)},
-            job_defaults = {
-                'coalesce': False,
-                'max_instances': 100,
-                'misfire_grace_time': 120,
-            },
-            timezone=pytz.utc)
-        self._parameters = parameters
-        self._service_generator = service_generator
-
-    def _schedule_service_setup(self) -> None:
-        if self._service_generator.num_services_generated >= self._parameters.num_services:
-            LOGGER.info('Generation Done!')
-            #self._scheduler.shutdown()
-            return
-        iat = random.expovariate(1.0 / self._parameters.inter_arrival_time)
-        run_date = datetime.utcnow() + timedelta(seconds=iat)
-        self._scheduler.add_job(
-            self._service_setup, trigger='date', run_date=run_date, timezone=pytz.utc)
-
-    def _schedule_service_teardown(self, service : Dict) -> None:
-        ht  = random.expovariate(1.0 / self._parameters.holding_time)
-        run_date = datetime.utcnow() + timedelta(seconds=ht)
-        self._scheduler.add_job(
-            self._service_teardown, args=(service,), trigger='date', run_date=run_date, timezone=pytz.utc)
-
-    def start(self):
-        self._schedule_service_setup()
-        self._scheduler.start()
-
-    def _service_setup(self) -> None:
-        self._schedule_service_setup()
-
-        service = self._service_generator.compose_service()
-        if service is None:
-            LOGGER.warning('No resources available to compose new service')
-            return
-
-        service_uuid = service['service_id']['service_uuid']['uuid']
-        src_device_uuid = service['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
-        src_endpoint_uuid = service['service_endpoint_ids'][0]['endpoint_uuid']['uuid']
-        dst_device_uuid = service['service_endpoint_ids'][1]['device_id']['device_uuid']['uuid']
-        dst_endpoint_uuid = service['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
-        LOGGER.info('Setup Service: uuid=%s src=%s:%s dst=%s:%s',
-            service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-
-        if not self._parameters.dry_mode:
-            service_add = copy.deepcopy(service)
-            service_add['service_endpoint_ids'] = []
-            service_add['service_constraints'] = []
-            service_add['service_config'] = {'config_rules': []}
-            service_client = ServiceClient()    # create instances per request to load balance between pods
-            service_client.CreateService(Service(**service_add))
-            service_client.UpdateService(Service(**service))
-
-        self._schedule_service_teardown(service)
-
-    def _service_teardown(self, service : Dict) -> None:
-        service_uuid = service['service_id']['service_uuid']['uuid']
-        src_device_uuid = service['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid']
-        src_endpoint_uuid = service['service_endpoint_ids'][0]['endpoint_uuid']['uuid']
-        dst_device_uuid = service['service_endpoint_ids'][1]['device_id']['device_uuid']['uuid']
-        dst_endpoint_uuid = service['service_endpoint_ids'][1]['endpoint_uuid']['uuid']
-        LOGGER.info('Teardown Service: uuid=%s src=%s:%s dst=%s:%s',
-            service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
-
-        if not self._parameters.dry_mode:
-            service_client = ServiceClient()    # create instances per request to load balance between pods
-            service_client.DeleteService(ServiceId(**(service['service_id'])))
-
-        self._service_generator.release_service(service)
diff --git a/src/tests/tools/load_gen/__main__.py b/src/tests/tools/load_gen/__main__.py
index c1a995ebbcb9b2e0e46bbbe7842a8ea08a70c4fa..f5d9d364d9707b4d038cd614aca0a4b1e3458619 100644
--- a/src/tests/tools/load_gen/__main__.py
+++ b/src/tests/tools/load_gen/__main__.py
@@ -13,10 +13,12 @@
 # limitations under the License.
 
 import logging, sys
-from .Constants import SERVICE_TYPE_L2NM, SERVICE_TYPE_L3NM, SERVICE_TYPE_TAPI
+from .Constants import (
+    REQUEST_TYPE_SERVICE_L2NM, REQUEST_TYPE_SERVICE_L3NM, REQUEST_TYPE_SERVICE_TAPI,
+    REQUEST_TYPE_SLICE_L2NM, REQUEST_TYPE_SLICE_L3NM)
 from .Parameters import Parameters
-from .ServiceGenerator import ServiceGenerator
-from .ServiceScheduler import ServiceScheduler
+from .RequestGenerator import RequestGenerator
+from .RequestScheduler import RequestScheduler
 
 logging.basicConfig(level=logging.INFO)
 LOGGER = logging.getLogger(__name__)
@@ -24,11 +26,13 @@ LOGGER = logging.getLogger(__name__)
 def main():
     LOGGER.info('Starting...')
     parameters = Parameters(
-        num_services  = 100,
-        service_types = [
-            SERVICE_TYPE_L2NM,
-            SERVICE_TYPE_L3NM,
-            #SERVICE_TYPE_TAPI,
+        num_requests = 100,
+        request_types = [
+            REQUEST_TYPE_SERVICE_L2NM,
+            REQUEST_TYPE_SERVICE_L3NM,
+            REQUEST_TYPE_SERVICE_TAPI,
+            REQUEST_TYPE_SLICE_L2NM,
+            REQUEST_TYPE_SLICE_L3NM,
         ],
         offered_load  = 50,
         holding_time  = 10,
@@ -36,11 +40,11 @@ def main():
     )
 
     LOGGER.info('Initializing Generator...')
-    service_generator = ServiceGenerator(parameters)
-    service_generator.initialize()
+    generator = RequestGenerator(parameters)
+    generator.initialize()
 
     LOGGER.info('Running Schedule...')
-    scheduler = ServiceScheduler(parameters, service_generator)
+    scheduler = RequestScheduler(parameters, generator)
     scheduler.start()
 
     LOGGER.info('Done!')