diff --git a/scripts/run_tests_locally-forecaster.sh b/scripts/run_tests_locally-forecaster.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4a3300b23d6548006931acffd08ec8be2e3b9827
--- /dev/null
+++ b/scripts/run_tests_locally-forecaster.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    forecaster/tests/test_unitary.py
diff --git a/src/forecaster/service/KpiManager.py b/src/forecaster/service/KpiManager.py
index bbf900d07eaf4bdc2105f3f53311855c5154d2ec..353ac893fd4ca233e7cfb713b20646bc54a12b8e 100644
--- a/src/forecaster/service/KpiManager.py
+++ b/src/forecaster/service/KpiManager.py
@@ -25,10 +25,10 @@ class KpiManager:
 
     def get_kpi_ids_from_link_ids(
         self, link_ids : List[LinkId]
-    ) -> Dict[Tuple[LinkId, KpiSampleType], KpiId]:
+    ) -> Dict[Tuple[LinkId, int], KpiId]:
         link_uuids = {link_id.link_uuid.uuid for link_id in link_ids}
         kpi_descriptors = self._monitoring_client.GetKpiDescriptorList(Empty())
-        kpi_ids : Dict[Tuple[LinkId, KpiSampleType], KpiId] = {
+        kpi_ids : Dict[Tuple[LinkId, int], KpiId] = {
             (kpi_descriptor.link_id, kpi_descriptor.kpi_sample_type) : kpi_descriptor.kpi_id
             for kpi_descriptor in kpi_descriptors
             if kpi_descriptor.link_id.link_uuid.uuid in link_uuids
diff --git a/src/forecaster/tests/Tools.py b/src/forecaster/tests/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e71447d771135c99d471b3244dffc9b11476a81
--- /dev/null
+++ b/src/forecaster/tests/Tools.py
@@ -0,0 +1,112 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import calendar, math, pandas
+from datetime import datetime, timezone
+from typing import Dict
+from common.tools.object_factory.Context import json_context
+from common.tools.object_factory.Device import (
+    json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id
+)
+from common.tools.object_factory.EndPoint import json_endpoint_descriptor, json_endpoint_id
+from common.tools.object_factory.Link import json_link
+from common.tools.object_factory.Topology import json_topology
+
+def time_datetime_to_int(dt_time : datetime) -> int:
+    return int(calendar.timegm(dt_time.timetuple()))
+
+def time_datetime_to_float(dt_time : datetime) -> float:
+    return time_datetime_to_int(dt_time) + (dt_time.microsecond / 1.e6)
+
+def time_utc_now_to_datetime() -> datetime:
+    return datetime.now(tz=timezone.utc)
+
+def time_utc_now_to_float() -> float:
+    return time_datetime_to_float(time_utc_now_to_datetime())
+
+def read_csv(csv_file : str) -> pandas.DataFrame:
+    df = pandas.read_csv(csv_file)
+
+    if 'dataset.csv' in csv_file:
+        df.rename(columns={'linkid': 'link_id', 'ds': 'timestamp', 'y': 'used_capacity_gbps'}, inplace=True)
+        df[['source', 'destination']] = df['link_id'].str.split('_', expand=True)
+    elif 'dataset2.csv' in csv_file:
+        df.drop(columns=['Unnamed: 0'], inplace=True)
+        df.rename(columns={
+            'target': 'destination', 'id': 'link_id', 'ds': 'timestamp', 'demandValue': 'used_capacity_gbps'
+        }, inplace=True)
+
+    df['timestamp'] = pandas.to_datetime(df['timestamp'])
+    max_timestamp = time_datetime_to_int(df['timestamp'].max())
+    now_timestamp = time_datetime_to_int(datetime.now(tz=timezone.utc))
+    df['timestamp'] = df['timestamp'] + pandas.offsets.Second(now_timestamp - max_timestamp)
+    df.sort_values('timestamp', ascending=True, inplace=True)
+    return df
+
+def compose_descriptors(df : pandas.DataFrame) -> Dict:
+    devices = dict()
+    links = dict()
+
+    #df1.groupby(['A','B']).size().reset_index().rename(columns={0:'count'})
+    df_links = df[['link_id', 'source', 'destination']].drop_duplicates()
+    for row in df_links.itertuples(index=False):
+        #print(row)
+        link_uuid = row.link_id
+        src_device_uuid = row.source
+        dst_device_uuid = row.destination
+        src_port_uuid = row.destination
+        dst_port_uuid = row.source
+
+        if src_device_uuid not in devices:
+            devices[src_device_uuid] = {'id': src_device_uuid, 'endpoints': set()}
+        devices[src_device_uuid]['endpoints'].add(src_port_uuid)
+
+        if dst_device_uuid not in devices:
+            devices[dst_device_uuid] = {'id': dst_device_uuid, 'endpoints': set()}
+        devices[dst_device_uuid]['endpoints'].add(dst_port_uuid)
+
+        if link_uuid not in links:
+            total_capacity_gbps = df[df.link_id==link_uuid]['used_capacity_gbps'].max()
+            total_capacity_gbps = math.ceil(total_capacity_gbps / 100) * 100 # round up in steps of 100
+            used_capacity_gbps  = df[df.link_id==link_uuid]['used_capacity_gbps'].tail(1)
+            links[link_uuid] = {
+                'id': link_uuid,
+                'src_dev': src_device_uuid, 'src_port': dst_device_uuid,
+                'dst_dev': dst_device_uuid, 'dst_port': src_device_uuid,
+                'total_capacity_gbps': total_capacity_gbps, 'used_capacity_gbps': used_capacity_gbps,
+            }
+
+    _context  = json_context('admin', name='admin')
+    _topology = json_topology('admin', name='admin', context_id=_context['context_id'])
+    descriptor = {
+        'contexts': [_context],
+        'topologies': [_topology],
+        'devices': [
+            json_device_emulated_packet_router_disabled(
+                device_uuid, name=device_uuid, config_rules=json_device_emulated_connect_rules([
+                    json_endpoint_descriptor(endpoint_uuid, 'copper', endpoint_name=endpoint_uuid)
+                    for endpoint_uuid in device_data['endpoints']
+                ]))
+            for device_uuid,device_data in devices.items()
+        ],
+        'links': [
+            json_link(link_uuid, [
+                json_endpoint_id(json_device_id(link_data['src_dev']), link_data['src_port']),
+                json_endpoint_id(json_device_id(link_data['dst_dev']), link_data['dst_port']),
+            ], name=link_uuid, total_capacity_gbps=link_data['total_capacity_gbps'],
+            used_capacity_gbps=link_data['used_capacity_gbps'])
+            for link_uuid,link_data in links.items()
+        ],
+    }
+    return descriptor
diff --git a/data/forecaster_data/dataset.csv b/src/forecaster/tests/data/dataset.csv
similarity index 100%
rename from data/forecaster_data/dataset.csv
rename to src/forecaster/tests/data/dataset.csv
diff --git a/data/forecaster_data/dataset2.csv b/src/forecaster/tests/data/dataset2.csv
similarity index 100%
rename from data/forecaster_data/dataset2.csv
rename to src/forecaster/tests/data/dataset2.csv
diff --git a/src/forecaster/tests/test_unitary.py b/src/forecaster/tests/test_unitary.py
index 29ebcb340cdcdc4d63814b55ffc50fdccec0f6dd..8e2c8821a2f4df25492ca750f9bfdb5f2128c2fd 100644
--- a/src/forecaster/tests/test_unitary.py
+++ b/src/forecaster/tests/test_unitary.py
@@ -12,15 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-from common.Constants import DEFAULT_CONTEXT_NAME
-from common.proto.context_pb2 import ContextId
+import logging, pandas, pytest
+from typing import Dict, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
+from common.proto.context_pb2 import ContextId, TopologyId
 from common.proto.forecaster_pb2 import ForecastLinkCapacityRequest, ForecastTopologyCapacityRequest
 from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
 from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from forecaster.client.ForecasterClient import ForecasterClient
-from monitoring.client.MonitoringClient import MonitoringClient
+from forecaster.tests.Tools import compose_descriptors, read_csv
 
 from .PrepareTestScenario import ( # pylint: disable=unused-import
     # be careful, order of symbols is important here!
@@ -29,15 +31,27 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
-DESCRIPTORS_FILE = '' # use dummy descriptor here
+JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID)
+ADMIN_TOPOLOGY_ID = TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=JSON_ADMIN_CONTEXT_ID))
+
+CSV_DATA_FILE = 'forecaster/tests/data/dataset.csv'
+#CSV_DATA_FILE = 'forecaster/tests/data/dataset2.csv'
+
+@pytest.fixture(scope='session')
+def scenario() -> Tuple[pandas.DataFrame, Dict]:
+    df = read_csv(CSV_DATA_FILE)
+    descriptors = compose_descriptors(df)
+    yield df, descriptors
 
 def test_prepare_environment(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
+    scenario : Tuple[pandas.DataFrame, Dict]
 ) -> None:
-    validate_empty_scenario(context_client)
+    _, descriptors = scenario
 
-    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTORS_FILE, context_client=context_client)
+    validate_empty_scenario(context_client)
+    descriptor_loader = DescriptorLoader(descriptors=descriptors, context_client=context_client)
     results = descriptor_loader.process()
     check_descriptor_load_results(results, descriptor_loader)
     descriptor_loader.validate()
@@ -51,38 +65,47 @@ def test_forecast_link(
     context_client : ContextClient,
     forecaster_client : ForecasterClient,
 ):  # pylint: disable=redefined-outer-name
-
-    # TODO: select link
-
+    topology = context_client.GetTopology(ADMIN_TOPOLOGY_ID)
+    link_id = topology.link_ids[0]
     forecast_request = ForecastLinkCapacityRequest()
+    forecast_request.link_id.CopyFrom(link_id)                  # pylint: disable=no-member
     forecast_request.forecast_window_seconds = 10 * 24 * 60 * 60 # 10 days in seconds
-    # TODO: populate request
     forecast_reply = forecaster_client.ForecastLinkCapacity(forecast_request)
-    # TODO: validate reply
+    assert forecast_reply.link_id == link_id
+    # TODO: validate forecasted values
 
 def test_forecast_topology(
     context_client : ContextClient,
     forecaster_client : ForecasterClient,
 ):  # pylint: disable=redefined-outer-name
-
-    # TODO: get topology id
-
     forecast_request = ForecastTopologyCapacityRequest()
+    forecast_request.topology_id.CopyFrom(ADMIN_TOPOLOGY_ID)    # pylint: disable=no-member
     forecast_request.forecast_window_seconds = 10 * 24 * 60 * 60 # 10 days in seconds
-    # TODO: populate request
     forecast_reply = forecaster_client.ForecastTopologyCapacity(forecast_request)
-    # TODO: validate reply
+
+    topology = context_client.GetTopology(ADMIN_TOPOLOGY_ID)
+    assert len(forecast_reply.link_capacities) == len(topology.link_ids)
+    reply_link_uuids = {
+        link_capacity.link_id.link_uuid.uuid
+        for link_capacity in forecast_reply.link_capacities
+    }
+    for link_id in topology.link_ids:
+        assert link_id.link_uuid.uuid in reply_link_uuids
+        # TODO: validate forecasted values
 
 def test_cleanup_environment(
     context_client : ContextClient, # pylint: disable=redefined-outer-name
+    scenario : Tuple[pandas.DataFrame, Dict]
 ) -> None:
+    _, descriptors = scenario
+
     # Verify the scenario has no services/slices
     response = context_client.GetContext(ADMIN_CONTEXT_ID)
     assert len(response.service_ids) == 0
     assert len(response.slice_ids) == 0
 
     # Load descriptors and validate the base scenario
-    descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTORS_FILE, context_client=context_client)
+    descriptor_loader = DescriptorLoader(descriptors=descriptors, context_client=context_client)
     descriptor_loader.validate()
     descriptor_loader.unload()
     validate_empty_scenario(context_client)