diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 2e411116766596d90f77e339f03684449780d9ae..a8671ba0d31b0b7bbd1de5da559cb1127a2fa1c3 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -34,6 +34,7 @@ include:
# - local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml'
+ - local: '/src/forecaster/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
diff --git a/manifests/forecasterservice.yaml b/manifests/forecasterservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..55d4add88f6fc507e9a4271cb40b20c4742c5bc7
--- /dev/null
+++ b/manifests/forecasterservice.yaml
@@ -0,0 +1,101 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: forecasterservice
+spec:
+ selector:
+ matchLabels:
+ app: forecasterservice
+ #replicas: 1
+ template:
+ metadata:
+ labels:
+ app: forecasterservice
+ spec:
+ terminationGracePeriodSeconds: 5
+ containers:
+ - name: server
+ image: labs.etsi.org:5050/tfs/controller/forecaster:latest
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 10040
+ - containerPort: 9192
+ env:
+ - name: LOG_LEVEL
+ value: "INFO"
+ - name: FORECAST_TO_HISTORY_RATIO
+ value: "10"
+ startupProbe:
+ exec:
+ command: ["/bin/grpc_health_probe", "-addr=:10040"]
+ failureThreshold: 30
+ periodSeconds: 1
+ readinessProbe:
+ exec:
+ command: ["/bin/grpc_health_probe", "-addr=:10040"]
+ livenessProbe:
+ exec:
+ command: ["/bin/grpc_health_probe", "-addr=:10040"]
+ resources:
+ requests:
+ cpu: 250m
+ memory: 128Mi
+ limits:
+ cpu: 1000m
+ memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: forecasterservice
+ labels:
+ app: forecasterservice
+spec:
+ type: ClusterIP
+ selector:
+ app: forecasterservice
+ ports:
+ - name: grpc
+ protocol: TCP
+ port: 10040
+ targetPort: 10040
+ - name: metrics
+ protocol: TCP
+ port: 9192
+ targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: forecasterservice-hpa
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: forecasterservice
+ minReplicas: 1
+ maxReplicas: 20
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 80
+ #behavior:
+ # scaleDown:
+ # stabilizationWindowSeconds: 30
diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml
index c85922d961ecc7b99e8fa2476b5e61db7ed52a9d..87d907a728d0b689dcedde730fad7a2e886a6659 100644
--- a/manifests/pathcompservice.yaml
+++ b/manifests/pathcompservice.yaml
@@ -37,6 +37,8 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
+ - name: ENABLE_FORECASTER
+ value: "YES"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10020"]
diff --git a/my_deploy.sh b/my_deploy.sh
index 99e5d40597f458db8546d28b2ef14f0b0d3358e4..525cb20ac3780fbb9f6257d6ed735f580b5fd221 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -40,6 +40,9 @@ export TFS_COMPONENTS="context device pathcomp service slice compute webui load_
# Uncomment to activate TE
#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+# Uncomment to activate Forecaster
+#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
+
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
diff --git a/proto/context.proto b/proto/context.proto
index 22e11bc68b840115a19551958ac322acb71fb9a4..3ccc13ab199ae7587b0c99340c85524f16e86431 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -236,10 +236,16 @@ message LinkId {
Uuid link_uuid = 1;
}
+message LinkAttributes {
+ float total_capacity_gbps = 1;
+ float used_capacity_gbps = 2;
+}
+
message Link {
LinkId link_id = 1;
string name = 2;
repeated EndPointId link_endpoint_ids = 3;
+ LinkAttributes attributes = 4;
}
message LinkIdList {
diff --git a/proto/forecaster.proto b/proto/forecaster.proto
index 5a4403b01c7f85d6d5b33548d0eaf463e39558cc..45cf6967c40831bec5a073b7fabbe25b6b966268 100644
--- a/proto/forecaster.proto
+++ b/proto/forecaster.proto
@@ -18,28 +18,27 @@ package forecaster;
import "context.proto";
service ForecasterService {
- rpc GetForecastOfTopology (context.TopologyId) returns (Forecast) {}
- rpc GetForecastOfLink(context.LinkId) returns (Forecast) {}
- rpc CheckService (context.ServiceId) returns (ForecastPrediction) {}
+ rpc ForecastLinkCapacity (ForecastLinkCapacityRequest ) returns (ForecastLinkCapacityReply ) {}
+ rpc ForecastTopologyCapacity(ForecastTopologyCapacityRequest) returns (ForecastTopologyCapacityReply) {}
}
-message SingleForecast {
- context.Timestamp timestamp= 1;
- double value = 2;
+message ForecastLinkCapacityRequest {
+ context.LinkId link_id = 1;
+ float forecast_window_seconds = 2;
}
-message Forecast {
- oneof uuid {
- context.TopologyId topologyId= 1;
- context.LinkId linkId = 2;
- }
- repeated SingleForecast forecast = 3;
+message ForecastLinkCapacityReply {
+ context.LinkId link_id = 1;
+ float total_capacity_gbps = 2;
+ float current_used_capacity_gbps = 3;
+ float forecast_used_capacity_gbps = 4;
}
-enum AvailabilityPredictionEnum {
- FORECASTED_AVAILABILITY = 0;
- FORECASTED_UNAVAILABILITY = 1;
+message ForecastTopologyCapacityRequest {
+ context.TopologyId topology_id = 1;
+ float forecast_window_seconds = 2;
}
-message ForecastPrediction {
- AvailabilityPredictionEnum prediction = 1;
+
+message ForecastTopologyCapacityReply {
+ repeated ForecastLinkCapacityReply link_capacities = 1;
}
diff --git a/proto/kpi_sample_types.proto b/proto/kpi_sample_types.proto
index 1ade4d69bf5a6c23d993cd37ed731eee10d7374e..5b234a4e35197557f41770984f7c8f6603672411 100644
--- a/proto/kpi_sample_types.proto
+++ b/proto/kpi_sample_types.proto
@@ -17,18 +17,26 @@ package kpi_sample_types;
enum KpiSampleType {
KPISAMPLETYPE_UNKNOWN = 0;
+
KPISAMPLETYPE_PACKETS_TRANSMITTED = 101;
KPISAMPLETYPE_PACKETS_RECEIVED = 102;
KPISAMPLETYPE_PACKETS_DROPPED = 103;
KPISAMPLETYPE_BYTES_TRANSMITTED = 201;
KPISAMPLETYPE_BYTES_RECEIVED = 202;
KPISAMPLETYPE_BYTES_DROPPED = 203;
+
+ KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS = 301;
+ KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS = 302;
+
KPISAMPLETYPE_ML_CONFIDENCE = 401; //. can be used by both optical and L3 without any issue
+
KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; //. can be used by both optical and L3 without any issue
+
KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601;
KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602;
KPISAMPLETYPE_L3_UNIQUE_ATTACKERS = 603;
KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS = 604;
KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO = 605;
+
KPISAMPLETYPE_SERVICE_LATENCY_MS = 701;
}
diff --git a/proto/monitoring.proto b/proto/monitoring.proto
index 3862973e056d6267d8defc68e77cbf3c8a10ebee..45ba48b0271c6e8890d7125ff44f62d2b6da6b58 100644
--- a/proto/monitoring.proto
+++ b/proto/monitoring.proto
@@ -49,6 +49,7 @@ message KpiDescriptor {
context.ServiceId service_id = 7;
context.SliceId slice_id = 8;
context.ConnectionId connection_id = 9;
+ context.LinkId link_id = 10;
}
message MonitorKpiRequest {
diff --git a/scripts/run_tests_locally-forecaster.sh b/scripts/run_tests_locally-forecaster.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e5b9e3e7d249461d6421dd4050890d80757644ab
--- /dev/null
+++ b/scripts/run_tests_locally-forecaster.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+ forecaster/tests/test_unitary.py
diff --git a/scripts/show_logs_forecaster.sh b/scripts/show_logs_forecaster.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6bb518fe5120db3620e5b25b3bb70b0483131ea3
--- /dev/null
+++ b/scripts/show_logs_forecaster.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/forecasterservice -c server
diff --git a/src/common/Constants.py b/src/common/Constants.py
index 91d3116359ff53e10adba8f4e3d6e8bcc05c2abb..0507cb0caa0d80081d49cffaf281f2f758a25d27 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -57,6 +57,7 @@ class ServiceNameEnum(Enum):
OPTICALATTACKMITIGATOR = 'opticalattackmitigator'
CACHING = 'caching'
TE = 'te'
+ FORECASTER = 'forecaster'
# Used for test and debugging only
DLT_GATEWAY = 'dltgateway'
@@ -82,6 +83,7 @@ DEFAULT_SERVICE_GRPC_PORTS = {
ServiceNameEnum.INTERDOMAIN .value : 10010,
ServiceNameEnum.PATHCOMP .value : 10020,
ServiceNameEnum.TE .value : 10030,
+ ServiceNameEnum.FORECASTER .value : 10040,
# Used for test and debugging only
ServiceNameEnum.DLT_GATEWAY .value : 50051,
diff --git a/src/common/tests/InMemoryObjectDatabase.py b/src/common/tests/InMemoryObjectDatabase.py
new file mode 100644
index 0000000000000000000000000000000000000000..21697a4355795775cc25112671c4e436fbbecb8c
--- /dev/null
+++ b/src/common/tests/InMemoryObjectDatabase.py
@@ -0,0 +1,65 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from typing import Any, Dict, List, Set
+
+LOGGER = logging.getLogger(__name__)
+
+class InMemoryObjectDatabase:
+ def __init__(self) -> None:
+ self._database : Dict[str, Dict[str, Any]] = dict()
+
+ def _get_container(self, container_name : str) -> Dict[str, Any]:
+ return self._database.setdefault(container_name, {})
+
+ def get_entries(self, container_name : str) -> List[Any]:
+ container = self._get_container(container_name)
+ return [container[entry_uuid] for entry_uuid in sorted(container.keys())]
+
+ def has_entry(self, container_name : str, entry_uuid : str) -> Any:
+ LOGGER.debug('[has_entry] BEFORE database={:s}'.format(str(self._database)))
+ container = self._get_container(container_name)
+ return entry_uuid in container
+
+ def get_entry(self, container_name : str, entry_uuid : str, context : grpc.ServicerContext) -> Any:
+ LOGGER.debug('[get_entry] BEFORE database={:s}'.format(str(self._database)))
+ container = self._get_container(container_name)
+ if entry_uuid not in container:
+ context.abort(grpc.StatusCode.NOT_FOUND, str('{:s}({:s}) not found'.format(container_name, entry_uuid)))
+ return container[entry_uuid]
+
+ def set_entry(self, container_name : str, entry_uuid : str, entry : Any) -> Any:
+ container = self._get_container(container_name)
+ LOGGER.debug('[set_entry] BEFORE database={:s}'.format(str(self._database)))
+ container[entry_uuid] = entry
+ LOGGER.debug('[set_entry] AFTER database={:s}'.format(str(self._database)))
+ return entry
+
+ def del_entry(self, container_name : str, entry_uuid : str, context : grpc.ServicerContext) -> None:
+ container = self._get_container(container_name)
+ LOGGER.debug('[del_entry] BEFORE database={:s}'.format(str(self._database)))
+ if entry_uuid not in container:
+ context.abort(grpc.StatusCode.NOT_FOUND, str('{:s}({:s}) not found'.format(container_name, entry_uuid)))
+ del container[entry_uuid]
+ LOGGER.debug('[del_entry] AFTER database={:s}'.format(str(self._database)))
+
+ def select_entries(self, container_name : str, entry_uuids : Set[str]) -> List[Any]:
+ if len(entry_uuids) == 0: return self.get_entries(container_name)
+ container = self._get_container(container_name)
+ return [
+ container[entry_uuid]
+ for entry_uuid in sorted(container.keys())
+ if entry_uuid in entry_uuids
+ ]
diff --git a/src/common/tests/InMemoryTimeSeriesDatabase.py b/src/common/tests/InMemoryTimeSeriesDatabase.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c4c86da88bfb3ca99ecd92e5baab7244bea414c
--- /dev/null
+++ b/src/common/tests/InMemoryTimeSeriesDatabase.py
@@ -0,0 +1,41 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, pandas
+from typing import List, Optional
+
+LOGGER = logging.getLogger(__name__)
+
+class InMemoryTimeSeriesDatabase:
+ def __init__(self) -> None:
+ self._data = pandas.DataFrame(columns=['timestamp', 'kpi_uuid', 'value'])
+
+ def filter(
+ self, kpi_uuids : List[str] = [], start_timestamp : Optional[float] = None,
+ end_timestamp : Optional[float] = None
+ ) -> pandas.DataFrame:
+ data = self._data
+
+ if len(kpi_uuids) > 0:
+ data = data[data.kpi_uuid.isin(kpi_uuids)]
+
+ if start_timestamp is not None:
+ start_datetime = pandas.to_datetime(start_timestamp, unit='s')
+ data = data[data.timestamp >= start_datetime]
+
+ if end_timestamp is not None:
+ end_datetime = pandas.to_datetime(end_timestamp, unit='s')
+ data = data[data.timestamp <= end_datetime]
+
+ return data
diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py
index e5d8ea76d25a81303df5a8e14073e1dcdc103ef0..55f87b7b0c03a7ae563dc10bd5e4964a07317c21 100644
--- a/src/common/tests/MockServicerImpl_Context.py
+++ b/src/common/tests/MockServicerImpl_Context.py
@@ -13,7 +13,7 @@
# limitations under the License.
import grpc, json, logging
-from typing import Any, Dict, Iterator, List, Set
+from typing import Any, Dict, Iterator, Set, Tuple
from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
from common.proto.context_pb2 import (
Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
@@ -25,226 +25,221 @@ from common.proto.context_pb2 import (
Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
from common.proto.context_pb2_grpc import ContextServiceServicer
-from common.tests.MockMessageBroker import (
+from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
+from .InMemoryObjectDatabase import InMemoryObjectDatabase
+from .MockMessageBroker import (
TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY,
MockMessageBroker, notify_event)
-from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
LOGGER = logging.getLogger(__name__)
-def get_container(database : Dict[str, Dict[str, Any]], container_name : str) -> Dict[str, Any]:
- return database.setdefault(container_name, {})
-
-def get_entries(database : Dict[str, Dict[str, Any]], container_name : str) -> List[Any]:
- container = get_container(database, container_name)
- return [container[entry_uuid] for entry_uuid in sorted(container.keys())]
-
-def has_entry(database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str) -> Any:
- LOGGER.debug('[has_entry] BEFORE database={:s}'.format(str(database)))
- container = get_container(database, container_name)
- return entry_uuid in container
-
-def get_entry(
- context : grpc.ServicerContext, database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str
-) -> Any:
- LOGGER.debug('[get_entry] BEFORE database={:s}'.format(str(database)))
- container = get_container(database, container_name)
- if entry_uuid not in container:
- context.abort(grpc.StatusCode.NOT_FOUND, str('{:s}({:s}) not found'.format(container_name, entry_uuid)))
- return container[entry_uuid]
-
-def set_entry(database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str, entry : Any) -> Any:
- container = get_container(database, container_name)
- LOGGER.debug('[set_entry] BEFORE database={:s}'.format(str(database)))
- container[entry_uuid] = entry
- LOGGER.debug('[set_entry] AFTER database={:s}'.format(str(database)))
- return entry
-
-def del_entry(
- context : grpc.ServicerContext, database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str
-) -> Any:
- container = get_container(database, container_name)
- if entry_uuid not in container:
- context.abort(grpc.StatusCode.NOT_FOUND, str('{:s}({:s}) not found'.format(container_name, entry_uuid)))
- del container[entry_uuid]
- return Empty()
-
-def select_entries(database : Dict[str, Dict[str, Any]], container_name : str, entry_uuids : Set[str]) -> List[Any]:
- if len(entry_uuids) == 0: return get_entries(database, container_name)
- container = get_container(database, container_name)
- return [
- container[entry_uuid]
- for entry_uuid in sorted(container.keys())
- if entry_uuid in entry_uuids
- ]
-
class MockServicerImpl_Context(ContextServiceServicer):
def __init__(self):
- LOGGER.info('[__init__] Creating Servicer...')
- self.database : Dict[str, Dict[str, Any]] = {}
+ LOGGER.debug('[__init__] Creating Servicer...')
+ self.obj_db = InMemoryObjectDatabase()
self.msg_broker = MockMessageBroker()
- LOGGER.info('[__init__] Servicer Created')
+ LOGGER.debug('[__init__] Servicer Created')
# ----- Common -----------------------------------------------------------------------------------------------------
- def _set(self, request, container_name, entry_uuid, entry_id_field_name, topic_name):
- exists = has_entry(self.database, container_name, entry_uuid)
- entry = set_entry(self.database, container_name, entry_uuid, request)
+ def _set(self, request, container_name, entry_uuid, entry_id_field_name, topic_name) -> Tuple[Any, Any]:
+ exists = self.obj_db.has_entry(container_name, entry_uuid)
+ entry = self.obj_db.set_entry(container_name, entry_uuid, request)
event_type = EventTypeEnum.EVENTTYPE_UPDATE if exists else EventTypeEnum.EVENTTYPE_CREATE
entry_id = getattr(entry, entry_id_field_name)
dict_entry_id = grpc_message_to_json(entry_id)
notify_event(self.msg_broker, topic_name, event_type, {entry_id_field_name: dict_entry_id})
- return entry_id
+ return entry_id, entry
- def _del(self, request, container_name, entry_uuid, entry_id_field_name, topic_name, grpc_context):
- empty = del_entry(grpc_context, self.database, container_name, entry_uuid)
+ def _del(self, request, container_name, entry_uuid, entry_id_field_name, topic_name, context) -> Empty:
+ self.obj_db.del_entry(container_name, entry_uuid, context)
event_type = EventTypeEnum.EVENTTYPE_REMOVE
dict_entry_id = grpc_message_to_json(request)
notify_event(self.msg_broker, topic_name, event_type, {entry_id_field_name: dict_entry_id})
- return empty
+ return Empty()
# ----- Context ----------------------------------------------------------------------------------------------------
def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList:
- LOGGER.info('[ListContextIds] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = ContextIdList(context_ids=[context.context_id for context in get_entries(self.database, 'context')])
- LOGGER.info('[ListContextIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListContextIds] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = ContextIdList(context_ids=[context.context_id for context in self.obj_db.get_entries('context')])
+ LOGGER.debug('[ListContextIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList:
- LOGGER.info('[ListContexts] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = ContextList(contexts=get_entries(self.database, 'context'))
- LOGGER.info('[ListContexts] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListContexts] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = ContextList(contexts=self.obj_db.get_entries('context'))
+ LOGGER.debug('[ListContexts] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context:
- LOGGER.info('[GetContext] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = get_entry(context, self.database, 'context', request.context_uuid.uuid)
- LOGGER.info('[GetContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[GetContext] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = self.obj_db.get_entry('context', request.context_uuid.uuid, context)
+ LOGGER.debug('[GetContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId:
- LOGGER.info('[SetContext] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = self._set(request, 'context', request.context_id.context_uuid.uuid, 'context_id', TOPIC_CONTEXT)
- LOGGER.info('[SetContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[SetContext] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply,_ = self._set(request, 'context', request.context_id.context_uuid.uuid, 'context_id', TOPIC_CONTEXT)
+ LOGGER.debug('[SetContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[RemoveContext] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[RemoveContext] request={:s}'.format(grpc_message_to_json_string(request)))
reply = self._del(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT, context)
- LOGGER.info('[RemoveContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[RemoveContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
- LOGGER.info('[GetContextEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetContextEvents] request={:s}'.format(grpc_message_to_json_string(request)))
for message in self.msg_broker.consume({TOPIC_CONTEXT}): yield ContextEvent(**json.loads(message.content))
# ----- Topology ---------------------------------------------------------------------------------------------------
def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList:
- LOGGER.info('[ListTopologyIds] request={:s}'.format(grpc_message_to_json_string(request)))
- topologies = get_entries(self.database, 'topology[{:s}]'.format(str(request.context_uuid.uuid)))
+ LOGGER.debug('[ListTopologyIds] request={:s}'.format(grpc_message_to_json_string(request)))
+ topologies = self.obj_db.get_entries('topology[{:s}]'.format(str(request.context_uuid.uuid)))
reply = TopologyIdList(topology_ids=[topology.topology_id for topology in topologies])
- LOGGER.info('[ListTopologyIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListTopologyIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList:
- LOGGER.info('[ListTopologies] request={:s}'.format(grpc_message_to_json_string(request)))
- topologies = get_entries(self.database, 'topology[{:s}]'.format(str(request.context_uuid.uuid)))
+ LOGGER.debug('[ListTopologies] request={:s}'.format(grpc_message_to_json_string(request)))
+ topologies = self.obj_db.get_entries('topology[{:s}]'.format(str(request.context_uuid.uuid)))
reply = TopologyList(topologies=[topology for topology in topologies])
- LOGGER.info('[ListTopologies] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListTopologies] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology:
- LOGGER.info('[GetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid))
- reply = get_entry(context, self.database, container_name, request.topology_uuid.uuid)
- LOGGER.info('[GetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply = self.obj_db.get_entry(container_name, request.topology_uuid.uuid, context)
+ LOGGER.debug('[GetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetTopologyDetails(self, request : TopologyId, context : grpc.ServicerContext) -> TopologyDetails:
- LOGGER.info('[GetTopologyDetails] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetTopologyDetails] request={:s}'.format(grpc_message_to_json_string(request)))
context_uuid = request.context_id.context_uuid.uuid
container_name = 'topology[{:s}]'.format(str(context_uuid))
topology_uuid = request.topology_uuid.uuid
- _reply = get_entry(context, self.database, container_name, topology_uuid)
+ _reply = self.obj_db.get_entry(container_name, topology_uuid, context)
reply = TopologyDetails()
- reply.topology_id.CopyFrom(_reply.topology_id)
+ reply.topology_id.CopyFrom(_reply.topology_id) # pylint: disable=no-member
reply.name = _reply.name
if context_uuid == DEFAULT_CONTEXT_NAME and topology_uuid == DEFAULT_TOPOLOGY_NAME:
- for device in get_entries(self.database, 'device'): reply.devices.append(device)
- for link in get_entries(self.database, 'link'): reply.links.append(link)
+ for device in self.obj_db.get_entries('device'): reply.devices.append(device) # pylint: disable=no-member
+ for link in self.obj_db.get_entries('link' ): reply.links .append(link ) # pylint: disable=no-member
else:
# TODO: to be improved; Mock does not associate devices/links to topologies automatically
for device_id in _reply.device_ids:
- device = get_entry(context, self.database, 'device', device_id.device_uuid.uuid)
- reply.devices.append(device)
+ device = self.obj_db.get_entry('device', device_id.device_uuid.uuid, context)
+ reply.devices.append(device) # pylint: disable=no-member
for link_id in _reply.link_ids:
- link = get_entry(context, self.database, 'link', link_id.link_uuid.uuid)
- reply.links.append(link)
- LOGGER.info('[GetTopologyDetails] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ link = self.obj_db.get_entry('link', link_id.link_uuid.uuid, context)
+ reply.links.append(link) # pylint: disable=no-member
+ LOGGER.debug('[GetTopologyDetails] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
- LOGGER.info('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'topology[{:s}]'.format(str(request.topology_id.context_id.context_uuid.uuid))
topology_uuid = request.topology_id.topology_uuid.uuid
- reply = self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY)
- LOGGER.info('[SetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply,_ = self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY)
+ LOGGER.debug('[SetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[RemoveTopology] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[RemoveTopology] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid))
topology_uuid = request.topology_uuid.uuid
reply = self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context)
- LOGGER.info('[RemoveTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[RemoveTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
- LOGGER.info('[GetTopologyEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetTopologyEvents] request={:s}'.format(grpc_message_to_json_string(request)))
for message in self.msg_broker.consume({TOPIC_TOPOLOGY}): yield TopologyEvent(**json.loads(message.content))
# ----- Device -----------------------------------------------------------------------------------------------------
def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList:
- LOGGER.info('[ListDeviceIds] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = DeviceIdList(device_ids=[device.device_id for device in get_entries(self.database, 'device')])
- LOGGER.info('[ListDeviceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListDeviceIds] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = DeviceIdList(device_ids=[device.device_id for device in self.obj_db.get_entries('device')])
+ LOGGER.debug('[ListDeviceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList:
- LOGGER.info('[ListDevices] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = DeviceList(devices=get_entries(self.database, 'device'))
- LOGGER.info('[ListDevices] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListDevices] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = DeviceList(devices=self.obj_db.get_entries('device'))
+ LOGGER.debug('[ListDevices] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device:
- LOGGER.info('[GetDevice] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = get_entry(context, self.database, 'device', request.device_uuid.uuid)
- LOGGER.info('[GetDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[GetDevice] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = self.obj_db.get_entry('device', request.device_uuid.uuid, context)
+ LOGGER.debug('[GetDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetDevice(self, request: Context, context : grpc.ServicerContext) -> DeviceId:
- LOGGER.info('[SetDevice] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = self._set(request, 'device', request.device_id.device_uuid.uuid, 'device_id', TOPIC_DEVICE)
- LOGGER.info('[SetDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[SetDevice] request={:s}'.format(grpc_message_to_json_string(request)))
+ device_uuid = request.device_id.device_uuid.uuid
+ reply, device = self._set(request, 'device', device_uuid, 'device_id', TOPIC_DEVICE)
+
+ context_topology_uuids : Set[Tuple[str, str]] = set()
+ context_topology_uuids.add((DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
+ for endpoint in device.device_endpoints:
+ endpoint_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid
+ if len(endpoint_context_uuid) == 0: endpoint_context_uuid = DEFAULT_CONTEXT_NAME
+ endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid
+ if len(endpoint_topology_uuid) == 0: endpoint_topology_uuid = DEFAULT_TOPOLOGY_NAME
+ context_topology_uuids.add((endpoint_context_uuid, endpoint_topology_uuid))
+
+ for context_uuid,topology_uuid in context_topology_uuids:
+ container_name = 'topology[{:s}]'.format(str(context_uuid))
+ topology = self.obj_db.get_entry(container_name, topology_uuid, context)
+ for _device_id in topology.device_ids:
+ if _device_id.device_uuid.uuid == device_uuid: break
+ else:
+ # device not found, add it
+ topology.device_ids.add().device_uuid.uuid = device_uuid
+
+ LOGGER.debug('[SetDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[RemoveDevice] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = self._del(request, 'device', request.device_uuid.uuid, 'device_id', TOPIC_DEVICE, context)
- LOGGER.info('[RemoveDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[RemoveDevice] request={:s}'.format(grpc_message_to_json_string(request)))
+ device_uuid = request.device_uuid.uuid
+ device = self.obj_db.get_entry('device', device_uuid, context)
+ reply = self._del(request, 'device', device_uuid, 'device_id', TOPIC_DEVICE, context)
+
+ context_topology_uuids : Set[Tuple[str, str]] = set()
+ context_topology_uuids.add((DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
+ for endpoint in device.device_endpoints:
+ endpoint_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid
+ if len(endpoint_context_uuid) == 0: endpoint_context_uuid = DEFAULT_CONTEXT_NAME
+ endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid
+ if len(endpoint_topology_uuid) == 0: endpoint_topology_uuid = DEFAULT_TOPOLOGY_NAME
+ context_topology_uuids.add((endpoint_context_uuid, endpoint_topology_uuid))
+
+ for context_uuid,topology_uuid in context_topology_uuids:
+ container_name = 'topology[{:s}]'.format(str(context_uuid))
+ topology = self.obj_db.get_entry(container_name, topology_uuid, context)
+ for device_id in topology.device_ids:
+ if device_id.device_uuid.uuid == device_uuid:
+ topology.device_ids.remove(device_id)
+ break
+
+ LOGGER.debug('[RemoveDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
- LOGGER.info('[GetDeviceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetDeviceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
for message in self.msg_broker.consume({TOPIC_DEVICE}): yield DeviceEvent(**json.loads(message.content))
def SelectDevice(self, request : DeviceFilter, context : grpc.ServicerContext) -> DeviceList:
- LOGGER.info('[SelectDevice] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[SelectDevice] request={:s}'.format(grpc_message_to_json_string(request)))
container_entry_uuids : Dict[str, Set[str]] = {}
container_name = 'device'
for device_id in request.device_ids.device_ids:
@@ -258,7 +253,7 @@ class MockServicerImpl_Context(ContextServiceServicer):
devices = list()
for container_name in sorted(container_entry_uuids.keys()):
entry_uuids = container_entry_uuids[container_name]
- for device in select_entries(self.database, container_name, entry_uuids):
+ for device in self.obj_db.select_entries(container_name, entry_uuids):
reply_device = Device()
reply_device.CopyFrom(device)
if exclude_endpoints: del reply_device.device_endpoints [:] # pylint: disable=no-member
@@ -267,92 +262,132 @@ class MockServicerImpl_Context(ContextServiceServicer):
devices.append(reply_device)
reply = DeviceList(devices=devices)
- LOGGER.info('[SelectDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[SelectDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
# ----- Link -------------------------------------------------------------------------------------------------------
def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList:
- LOGGER.info('[ListLinkIds] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = LinkIdList(link_ids=[link.link_id for link in get_entries(self.database, 'link')])
- LOGGER.info('[ListLinkIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListLinkIds] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = LinkIdList(link_ids=[link.link_id for link in self.obj_db.get_entries('link')])
+ LOGGER.debug('[ListLinkIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList:
- LOGGER.info('[ListLinks] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = LinkList(links=get_entries(self.database, 'link'))
- LOGGER.info('[ListLinks] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListLinks] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = LinkList(links=self.obj_db.get_entries('link'))
+ LOGGER.debug('[ListLinks] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link:
- LOGGER.info('[GetLink] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = get_entry(context, self.database, 'link', request.link_uuid.uuid)
- LOGGER.info('[GetLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[GetLink] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = self.obj_db.get_entry('link', request.link_uuid.uuid, context)
+ LOGGER.debug('[GetLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetLink(self, request: Context, context : grpc.ServicerContext) -> LinkId:
- LOGGER.info('[SetLink] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = self._set(request, 'link', request.link_id.link_uuid.uuid, 'link_id', TOPIC_LINK)
- LOGGER.info('[SetLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[SetLink] request={:s}'.format(grpc_message_to_json_string(request)))
+ link_uuid = request.link_id.link_uuid.uuid
+ reply, link = self._set(request, 'link', link_uuid, 'link_id', TOPIC_LINK)
+
+ context_topology_uuids : Set[Tuple[str, str]] = set()
+ context_topology_uuids.add((DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
+ for endpoint_id in link.link_endpoint_ids:
+ endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+ if len(endpoint_context_uuid) == 0: endpoint_context_uuid = DEFAULT_CONTEXT_NAME
+ endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
+ if len(endpoint_topology_uuid) == 0: endpoint_topology_uuid = DEFAULT_TOPOLOGY_NAME
+ context_topology_uuids.add((endpoint_context_uuid, endpoint_topology_uuid))
+
+ for context_uuid,topology_uuid in context_topology_uuids:
+ container_name = 'topology[{:s}]'.format(str(context_uuid))
+ topology = self.obj_db.get_entry(container_name, topology_uuid, context)
+ for _link_id in topology.link_ids:
+ if _link_id.link_uuid.uuid == link_uuid: break
+ else:
+ # link not found, add it
+ topology.link_ids.add().link_uuid.uuid = link_uuid
+
+ LOGGER.debug('[SetLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[RemoveLink] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = self._del(request, 'link', request.link_uuid.uuid, 'link_id', TOPIC_LINK, context)
- LOGGER.info('[RemoveLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[RemoveLink] request={:s}'.format(grpc_message_to_json_string(request)))
+ link_uuid = request.link_uuid.uuid
+ link = self.obj_db.get_entry('link', link_uuid, context)
+ reply = self._del(request, 'link', link_uuid, 'link_id', TOPIC_LINK, context)
+
+ context_topology_uuids : Set[Tuple[str, str]] = set()
+ context_topology_uuids.add((DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
+ for endpoint_id in link.link_endpoint_ids:
+ endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+ if len(endpoint_context_uuid) == 0: endpoint_context_uuid = DEFAULT_CONTEXT_NAME
+ endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
+ if len(endpoint_topology_uuid) == 0: endpoint_topology_uuid = DEFAULT_TOPOLOGY_NAME
+ context_topology_uuids.add((endpoint_context_uuid, endpoint_topology_uuid))
+
+ for context_uuid,topology_uuid in context_topology_uuids:
+ container_name = 'topology[{:s}]'.format(str(context_uuid))
+ topology = self.obj_db.get_entry(container_name, topology_uuid, context)
+ for link_id in topology.link_ids:
+ if link_id.link_uuid.uuid == link_uuid:
+ topology.link_ids.remove(link_id)
+ break
+
+ LOGGER.debug('[RemoveLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
- LOGGER.info('[GetLinkEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetLinkEvents] request={:s}'.format(grpc_message_to_json_string(request)))
for message in self.msg_broker.consume({TOPIC_LINK}): yield LinkEvent(**json.loads(message.content))
# ----- Slice ------------------------------------------------------------------------------------------------------
def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList:
- LOGGER.info('[ListSliceIds] request={:s}'.format(grpc_message_to_json_string(request)))
- slices = get_entries(self.database, 'slice[{:s}]'.format(str(request.context_uuid.uuid)))
+ LOGGER.debug('[ListSliceIds] request={:s}'.format(grpc_message_to_json_string(request)))
+ slices = self.obj_db.get_entries('slice[{:s}]'.format(str(request.context_uuid.uuid)))
reply = SliceIdList(slice_ids=[slice.slice_id for slice in slices])
- LOGGER.info('[ListSliceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListSliceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList:
- LOGGER.info('[ListSlices] request={:s}'.format(grpc_message_to_json_string(request)))
- slices = get_entries(self.database, 'slice[{:s}]'.format(str(request.context_uuid.uuid)))
+ LOGGER.debug('[ListSlices] request={:s}'.format(grpc_message_to_json_string(request)))
+ slices = self.obj_db.get_entries('slice[{:s}]'.format(str(request.context_uuid.uuid)))
reply = SliceList(slices=[slice for slice in slices])
- LOGGER.info('[ListSlices] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListSlices] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice:
- LOGGER.info('[GetSlice] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetSlice] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid))
- reply = get_entry(context, self.database, container_name, request.slice_uuid.uuid)
- LOGGER.info('[GetSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply = self.obj_db.get_entry(container_name, request.slice_uuid.uuid, context)
+ LOGGER.debug('[GetSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
- LOGGER.info('[SetSlice] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[SetSlice] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'slice[{:s}]'.format(str(request.slice_id.context_id.context_uuid.uuid))
slice_uuid = request.slice_id.slice_uuid.uuid
- reply = self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE)
- LOGGER.info('[SetSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply,_ = self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE)
+ LOGGER.debug('[SetSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[RemoveSlice] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[RemoveSlice] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid))
slice_uuid = request.slice_uuid.uuid
reply = self._del(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE, context)
- LOGGER.info('[RemoveSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[RemoveSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
- LOGGER.info('[GetSliceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetSliceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
for message in self.msg_broker.consume({TOPIC_SLICE}): yield SliceEvent(**json.loads(message.content))
def SelectSlice(self, request : SliceFilter, context : grpc.ServicerContext) -> SliceList:
- LOGGER.info('[SelectSlice] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[SelectSlice] request={:s}'.format(grpc_message_to_json_string(request)))
container_entry_uuids : Dict[str, Set[str]] = {}
for slice_id in request.slice_ids.slice_ids:
container_name = 'slice[{:s}]'.format(str(slice_id.context_id.context_uuid.uuid))
@@ -368,7 +403,7 @@ class MockServicerImpl_Context(ContextServiceServicer):
slices = list()
for container_name in sorted(container_entry_uuids.keys()):
entry_uuids = container_entry_uuids[container_name]
- for eslice in select_entries(self.database, container_name, entry_uuids):
+ for eslice in self.obj_db.select_entries(container_name, entry_uuids):
reply_slice = Slice()
reply_slice.CopyFrom(eslice)
if exclude_endpoint_ids: del reply_slice.service_endpoint_ids[:] # pylint: disable=no-member
@@ -379,55 +414,55 @@ class MockServicerImpl_Context(ContextServiceServicer):
slices.append(reply_slice)
reply = SliceList(slices=slices)
- LOGGER.info('[SelectSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[SelectSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
# ----- Service ----------------------------------------------------------------------------------------------------
def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList:
- LOGGER.info('[ListServiceIds] request={:s}'.format(grpc_message_to_json_string(request)))
- services = get_entries(self.database, 'service[{:s}]'.format(str(request.context_uuid.uuid)))
+ LOGGER.debug('[ListServiceIds] request={:s}'.format(grpc_message_to_json_string(request)))
+ services = self.obj_db.get_entries('service[{:s}]'.format(str(request.context_uuid.uuid)))
reply = ServiceIdList(service_ids=[service.service_id for service in services])
- LOGGER.info('[ListServiceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListServiceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
- LOGGER.info('[ListServices] request={:s}'.format(grpc_message_to_json_string(request)))
- services = get_entries(self.database, 'service[{:s}]'.format(str(request.context_uuid.uuid)))
+ LOGGER.debug('[ListServices] request={:s}'.format(grpc_message_to_json_string(request)))
+ services = self.obj_db.get_entries('service[{:s}]'.format(str(request.context_uuid.uuid)))
reply = ServiceList(services=[service for service in services])
- LOGGER.info('[ListServices] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[ListServices] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service:
- LOGGER.info('[GetService] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetService] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid))
- reply = get_entry(context, self.database, container_name, request.service_uuid.uuid)
- LOGGER.info('[GetService] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply = self.obj_db.get_entry(container_name, request.service_uuid.uuid, context)
+ LOGGER.debug('[GetService] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId:
- LOGGER.info('[SetService] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[SetService] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid))
service_uuid = request.service_id.service_uuid.uuid
- reply = self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE)
- LOGGER.info('[SetService] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply,_ = self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE)
+ LOGGER.debug('[SetService] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid))
service_uuid = request.service_uuid.uuid
reply = self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context)
- LOGGER.info('[RemoveService] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[RemoveService] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
- LOGGER.info('[GetServiceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetServiceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
for message in self.msg_broker.consume({TOPIC_SERVICE}): yield ServiceEvent(**json.loads(message.content))
def SelectService(self, request : ServiceFilter, context : grpc.ServicerContext) -> ServiceList:
- LOGGER.info('[SelectService] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[SelectService] request={:s}'.format(grpc_message_to_json_string(request)))
container_entry_uuids : Dict[str, Set[str]] = {}
for service_id in request.service_ids.service_ids:
container_name = 'service[{:s}]'.format(str(service_id.context_id.context_uuid.uuid))
@@ -441,7 +476,7 @@ class MockServicerImpl_Context(ContextServiceServicer):
services = list()
for container_name in sorted(container_entry_uuids.keys()):
entry_uuids = container_entry_uuids[container_name]
- for service in select_entries(self.database, container_name, entry_uuids):
+ for service in self.obj_db.select_entries(container_name, entry_uuids):
reply_service = Service()
reply_service.CopyFrom(service)
if exclude_endpoint_ids: del reply_service.service_endpoint_ids[:] # pylint: disable=no-member
@@ -450,54 +485,54 @@ class MockServicerImpl_Context(ContextServiceServicer):
services.append(reply_service)
reply = ServiceList(services=services)
- LOGGER.info('[SelectService] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[SelectService] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
# ----- Connection -------------------------------------------------------------------------------------------------
def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList:
- LOGGER.info('[ListConnectionIds] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[ListConnectionIds] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service_connections[{:s}/{:s}]'.format(
str(request.context_id.context_uuid.uuid), str(request.service_uuid.uuid))
- reply = ConnectionIdList(connection_ids=[c.connection_id for c in get_entries(self.database, container_name)])
- LOGGER.info('[ListConnectionIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply = ConnectionIdList(connection_ids=[c.connection_id for c in self.obj_db.get_entries(container_name)])
+ LOGGER.debug('[ListConnectionIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def ListConnections(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionList:
- LOGGER.info('[ListConnections] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[ListConnections] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service_connections[{:s}/{:s}]'.format(
str(request.context_id.context_uuid.uuid), str(request.service_uuid.uuid))
- reply = ConnectionList(connections=get_entries(self.database, container_name))
- LOGGER.info('[ListConnections] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ reply = ConnectionList(connections=self.obj_db.get_entries(container_name))
+ LOGGER.debug('[ListConnections] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection:
- LOGGER.info('[GetConnection] request={:s}'.format(grpc_message_to_json_string(request)))
- reply = get_entry(context, self.database, 'connection', request.connection_uuid.uuid)
- LOGGER.info('[GetConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[GetConnection] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = self.obj_db.get_entry('connection', request.connection_uuid.uuid, context)
+ LOGGER.debug('[GetConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId:
- LOGGER.info('[SetConnection] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[SetConnection] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service_connection[{:s}/{:s}]'.format(
str(request.service_id.context_id.context_uuid.uuid), str(request.service_id.service_uuid.uuid))
connection_uuid = request.connection_id.connection_uuid.uuid
- set_entry(self.database, container_name, connection_uuid, request)
- reply = self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION)
- LOGGER.info('[SetConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ self.obj_db.set_entry(container_name, connection_uuid, request)
+ reply,_ = self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION)
+ LOGGER.debug('[SetConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[RemoveConnection] request={:s}'.format(grpc_message_to_json_string(request)))
- connection = get_entry(context, self.database, 'connection', request.connection_uuid.uuid)
+ LOGGER.debug('[RemoveConnection] request={:s}'.format(grpc_message_to_json_string(request)))
+ connection = self.obj_db.get_entry('connection', request.connection_uuid.uuid, context)
container_name = 'service_connection[{:s}/{:s}]'.format(
str(connection.service_id.context_id.context_uuid.uuid), str(connection.service_id.service_uuid.uuid))
connection_uuid = request.connection_uuid.uuid
- del_entry(context, self.database, container_name, connection_uuid)
+ self.obj_db.del_entry(container_name, connection_uuid, context)
reply = self._del(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION, context)
- LOGGER.info('[RemoveConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ LOGGER.debug('[RemoveConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
- LOGGER.info('[GetConnectionEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[GetConnectionEvents] request={:s}'.format(grpc_message_to_json_string(request)))
for message in self.msg_broker.consume({TOPIC_CONNECTION}): yield ConnectionEvent(**json.loads(message.content))
diff --git a/src/common/tests/MockServicerImpl_Monitoring.py b/src/common/tests/MockServicerImpl_Monitoring.py
index 7bebf8732dece43fd6c0b5982ea93c70d3ce0bea..4aadb8e5e20575321df2003c69a5ab9fe2390af8 100644
--- a/src/common/tests/MockServicerImpl_Monitoring.py
+++ b/src/common/tests/MockServicerImpl_Monitoring.py
@@ -12,23 +12,107 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-import grpc, logging
+import enum, grpc, logging
from queue import Queue
+from typing import Any, Optional
from common.proto.context_pb2 import Empty
-from common.proto.monitoring_pb2 import Kpi
+from common.proto.monitoring_pb2 import Kpi, KpiDescriptor, KpiDescriptorList, KpiId, KpiQuery, RawKpiTable
from common.proto.monitoring_pb2_grpc import MonitoringServiceServicer
from common.tools.grpc.Tools import grpc_message_to_json_string
+from .InMemoryObjectDatabase import InMemoryObjectDatabase
+from .InMemoryTimeSeriesDatabase import InMemoryTimeSeriesDatabase
LOGGER = logging.getLogger(__name__)
+class IMDB_ContainersEnum(enum.Enum):
+ KPI_DESCRIPTORS = 'kpi_descriptor'
+
class MockServicerImpl_Monitoring(MonitoringServiceServicer):
- def __init__(self, queue_samples : Queue):
- LOGGER.info('[__init__] Creating Servicer...')
+ def __init__(
+ self, queue_samples : Optional[Queue] = None
+ ) -> None:
+ LOGGER.debug('[__init__] Creating Servicer...')
+ if queue_samples is None: queue_samples = Queue()
self.queue_samples = queue_samples
- LOGGER.info('[__init__] Servicer Created')
+ self.obj_db = InMemoryObjectDatabase()
+ self.ts_db = InMemoryTimeSeriesDatabase()
+ LOGGER.debug('[__init__] Servicer Created')
+
+ # ----- Common -----------------------------------------------------------------------------------------------------
+
+ def _set(self, container_name, entry_uuid, entry_id_field_name, entry) -> Any:
+ entry = self.obj_db.set_entry(container_name, entry_uuid, entry)
+ return getattr(entry, entry_id_field_name)
+
+ def _del(self, container_name, entry_uuid, grpc_context) -> Empty:
+ self.obj_db.del_entry(container_name, entry_uuid, grpc_context)
+ return Empty()
+
+ # ----- KPI Descriptor ---------------------------------------------------------------------------------------------
+
+ def GetKpiDescriptorList(self, request : Empty, context : grpc.ServicerContext) -> KpiDescriptorList:
+ LOGGER.debug('[GetKpiDescriptorList] request={:s}'.format(grpc_message_to_json_string(request)))
+ kpi_descriptor_list = self.obj_db.get_entries(IMDB_ContainersEnum.KPI_DESCRIPTORS.value)
+ reply = KpiDescriptorList(kpi_descriptor_list=kpi_descriptor_list)
+ LOGGER.debug('[GetKpiDescriptorList] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ return reply
+
+ def GetKpiDescriptor(self, request : KpiId, context : grpc.ServicerContext) -> KpiDescriptor:
+ LOGGER.debug('[GetKpiDescriptor] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = self.obj_db.get_entry(IMDB_ContainersEnum.KPI_DESCRIPTORS.value, request.kpi_id.uuid, context)
+ LOGGER.debug('[GetKpiDescriptor] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ return reply
+
+ def SetKpi(self, request : KpiDescriptor, context : grpc.ServicerContext) -> KpiId:
+ LOGGER.debug('[SetKpi] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = self._set(IMDB_ContainersEnum.KPI_DESCRIPTORS.value, request.kpi_id.kpi_id.uuid, 'kpi_id', request)
+ LOGGER.debug('[SetKpi] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ return reply
+
+ def DeleteKpi(self, request : KpiId, context : grpc.ServicerContext) -> Empty:
+ LOGGER.debug('[DeleteKpi] request={:s}'.format(grpc_message_to_json_string(request)))
+ reply = self._del(IMDB_ContainersEnum.KPI_DESCRIPTORS.value, request.kpi_id.kpi_id.uuid, context)
+ LOGGER.debug('[DeleteKpi] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ return reply
+
+ # ----- KPI Sample -------------------------------------------------------------------------------------------------
def IncludeKpi(self, request : Kpi, context : grpc.ServicerContext) -> Empty:
- LOGGER.info('[IncludeKpi] request={:s}'.format(grpc_message_to_json_string(request)))
+ LOGGER.debug('[IncludeKpi] request={:s}'.format(grpc_message_to_json_string(request)))
self.queue_samples.put(request)
- return Empty()
+ reply = Empty()
+ LOGGER.debug('[IncludeKpi] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ return reply
+
+ def QueryKpiData(self, request : KpiQuery, context : grpc.ServicerContext) -> RawKpiTable:
+ LOGGER.debug('[QueryKpiData] request={:s}'.format(grpc_message_to_json_string(request)))
+ # TODO: add filters for request.monitoring_window_s
+ # TODO: add filters for request.last_n_samples
+ kpi_uuids = [kpi_id.kpi_id.uuid for kpi_id in request.kpi_ids]
+
+ start_timestamp = request.start_timestamp.timestamp
+ if start_timestamp <= 0: start_timestamp = None
+
+ end_timestamp = request.end_timestamp.timestamp
+ if end_timestamp <= 0: end_timestamp = None
+
+ df_samples = self.ts_db.filter(kpi_uuids, start_timestamp=start_timestamp, end_timestamp=end_timestamp)
+ #LOGGER.debug('[QueryKpiData] df_samples={:s}'.format(df_samples.to_string()))
+ reply = RawKpiTable()
+ kpi_uuid__to__raw_kpi_list = dict()
+
+ for df_sample in df_samples.itertuples():
+ kpi_uuid = df_sample.kpi_uuid
+ if kpi_uuid in kpi_uuid__to__raw_kpi_list:
+ raw_kpi_list = kpi_uuid__to__raw_kpi_list[kpi_uuid]
+ else:
+ raw_kpi_list = reply.raw_kpi_lists.add() # pylint: disable=no-member
+ raw_kpi_list.kpi_id.kpi_id.uuid = kpi_uuid
+ kpi_uuid__to__raw_kpi_list[kpi_uuid] = raw_kpi_list
+
+ raw_kpi = raw_kpi_list.raw_kpis.add()
+ raw_kpi.timestamp.timestamp = df_sample.timestamp.timestamp()
+ raw_kpi.kpi_value.floatVal = df_sample.value
+
+ LOGGER.debug('[QueryKpiData] reply={:s}'.format(grpc_message_to_json_string(reply)))
+ return reply
diff --git a/src/common/tools/object_factory/Constraint.py b/src/common/tools/object_factory/Constraint.py
index ef00e3872343196f0a9f8de97d3b1ab6fc12d847..9fccd9d5f97d64cac2dea441bbbb374d638df114 100644
--- a/src/common/tools/object_factory/Constraint.py
+++ b/src/common/tools/object_factory/Constraint.py
@@ -19,6 +19,9 @@ def json_constraint_custom(constraint_type : str, constraint_value : Union[str,
if not isinstance(constraint_value, str): constraint_value = json.dumps(constraint_value, sort_keys=True)
return {'custom': {'constraint_type': constraint_type, 'constraint_value': constraint_value}}
+def json_constraint_schedule(start_timestamp : float, duration_days : float) -> Dict:
+ return {'schedule': {'start_timestamp': start_timestamp, 'duration_days': duration_days}}
+
def json_constraint_endpoint_location_region(endpoint_id : Dict, region : str) -> Dict:
return {'endpoint_location': {'endpoint_id': endpoint_id, 'location': {'region': region}}}
@@ -29,16 +32,27 @@ def json_constraint_endpoint_location_gps(endpoint_id : Dict, latitude : float,
def json_constraint_endpoint_priority(endpoint_id : Dict, priority : int) -> Dict:
return {'endpoint_priority': {'endpoint_id': endpoint_id, 'priority': priority}}
+def json_constraint_sla_capacity(capacity_gbps : float) -> Dict:
+ return {'sla_capacity': {'capacity_gbps': capacity_gbps}}
+
+def json_constraint_sla_latency(e2e_latency_ms : float) -> Dict:
+ return {'sla_latency': {'e2e_latency_ms': e2e_latency_ms}}
+
def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool, availability : float) -> Dict:
return {'sla_availability': {
'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active, 'availability': availability
}}
-def json_constraint_sla_capacity(capacity_gbps : float) -> Dict:
- return {'sla_capacity': {'capacity_gbps': capacity_gbps}}
-
def json_constraint_sla_isolation(isolation_levels : List[int]) -> Dict:
return {'sla_isolation': {'isolation_level': isolation_levels}}
-def json_constraint_sla_latency(e2e_latency_ms : float) -> Dict:
- return {'sla_latency': {'e2e_latency_ms': e2e_latency_ms}}
+def json_constraint_exclusions(
+ is_permanent : bool = False, device_ids : List[Dict] = [], endpoint_ids : List[Dict] = [],
+ link_ids : List[Dict] = []
+) -> Dict:
+ return {'exclusions': {
+ 'is_permanent' : is_permanent,
+ 'device_ids' : device_ids,
+ 'endpoint_ids' : endpoint_ids,
+ 'link_ids' : link_ids,
+ }}
diff --git a/src/common/tools/object_factory/Link.py b/src/common/tools/object_factory/Link.py
index 5f8080d300d9d6d646b8d769ec5819b0bd26f789..c0a4c48d1beea64e6591e47441509fa2cc42c02b 100644
--- a/src/common/tools/object_factory/Link.py
+++ b/src/common/tools/object_factory/Link.py
@@ -23,13 +23,28 @@ def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str:
def json_link_id(link_uuid : str) -> Dict:
return {'link_uuid': {'uuid': link_uuid}}
-def json_link(link_uuid : str, endpoint_ids : List[Dict], name : Optional[str] = None) -> Dict:
+def json_link(
+ link_uuid : str, endpoint_ids : List[Dict], name : Optional[str] = None,
+ total_capacity_gbps : Optional[float] = None, used_capacity_gbps : Optional[float] = None
+) -> Dict:
result = {'link_id': json_link_id(link_uuid), 'link_endpoint_ids': copy.deepcopy(endpoint_ids)}
if name is not None: result['name'] = name
+ if total_capacity_gbps is not None:
+ attributes : Dict = result.setdefault('attributes', dict())
+ attributes.setdefault('total_capacity_gbps', total_capacity_gbps)
+ if used_capacity_gbps is not None:
+ attributes : Dict = result.setdefault('attributes', dict())
+ attributes.setdefault('used_capacity_gbps', used_capacity_gbps)
return result
-def compose_link(endpoint_a, endpoint_z) -> Tuple[Dict, Dict]:
+def compose_link(
+ endpoint_a : Dict, endpoint_z : Dict, name : Optional[str] = None,
+ total_capacity_gbps : Optional[float] = None, used_capacity_gbps : Optional[float] = None
+) -> Tuple[Dict, Dict]:
link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
link_id = json_link_id(link_uuid)
- link = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
+ link = json_link(
+ link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']], name=name,
+ total_capacity_gbps=total_capacity_gbps, used_capacity_gbps=used_capacity_gbps
+ )
return link_id, link
diff --git a/src/common/tools/timestamp/Converters.py b/src/common/tools/timestamp/Converters.py
index 0ef8e0863b71b610602dfc0ee4fc7c72d25a1139..7918017390e60bd7830d3513216fc0b8f6cf83ef 100644
--- a/src/common/tools/timestamp/Converters.py
+++ b/src/common/tools/timestamp/Converters.py
@@ -13,14 +13,23 @@
# limitations under the License.
-import dateutil.parser
+import dateutil.parser, math
from datetime import datetime, timezone
+def timestamp_datetime_to_float(dt_timestamp : datetime) -> int:
+ return math.floor(dt_timestamp.timestamp())
+
+def timestamp_datetime_to_int(dt_timestamp : datetime) -> int:
+ return math.floor(timestamp_datetime_to_float(dt_timestamp))
+
def timestamp_string_to_float(str_timestamp : str) -> float:
- return datetime.timestamp(dateutil.parser.isoparse(str_timestamp))
+ return timestamp_datetime_to_float(dateutil.parser.isoparse(str_timestamp))
def timestamp_float_to_string(flt_timestamp : float) -> str:
return datetime.utcfromtimestamp(flt_timestamp).isoformat() + 'Z'
+def timestamp_utcnow_to_datetime() -> datetime:
+ return datetime.now(tz=timezone.utc)
+
def timestamp_utcnow_to_float() -> float:
- return datetime.timestamp(datetime.now(tz=timezone.utc))
+ return timestamp_datetime_to_float(timestamp_utcnow_to_datetime())
diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py
index 42ea864f3c0c1150c3806f97e67ff3969542ab70..286ae179d325b6e70d6ebf509de92e354ba42bc8 100644
--- a/src/common/type_checkers/Assertions.py
+++ b/src/common/type_checkers/Assertions.py
@@ -53,6 +53,8 @@ def validate_kpi_sample_types_enum(message):
'KPISAMPLETYPE_PACKETS_RECEIVED',
'KPISAMPLETYPE_BYTES_TRANSMITTED',
'KPISAMPLETYPE_BYTES_RECEIVED',
+ 'KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS',
+ 'KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS',
]
def validate_service_type_enum(message):
diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml
index 63fc2d94307f556140e15e984789b9495f2d8270..5de4bc1fcbb5bea98a7675253efe060df03a1237 100644
--- a/src/context/.gitlab-ci.yml
+++ b/src/context/.gitlab-ci.yml
@@ -53,6 +53,7 @@ unit_test context:
- if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
- if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+ - docker container prune -f
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker pull "cockroachdb/cockroach:latest-v22.2"
diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py
index 67ac9f518f610caedc631444187cac10aded56c7..4ca3cee68e45cc9b5a8f4e0d9f1b07a3ec39f268 100644
--- a/src/context/service/database/Link.py
+++ b/src/context/service/database/Link.py
@@ -99,11 +99,25 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link)
})
topology_uuids.add(endpoint_topology_uuid)
+ total_capacity_gbps, used_capacity_gbps = None, None
+ if request.HasField('attributes'):
+ attributes = request.attributes
+ # In proto3, HasField() does not work for scalar fields, using ListFields() instead.
+ attribute_names = set([field.name for field,_ in attributes.ListFields()])
+ if 'total_capacity_gbps' in attribute_names:
+ total_capacity_gbps = attributes.total_capacity_gbps
+ if 'used_capacity_gbps' in attribute_names:
+ used_capacity_gbps = attributes.used_capacity_gbps
+ elif total_capacity_gbps is not None:
+ used_capacity_gbps = total_capacity_gbps
+
link_data = [{
- 'link_uuid' : link_uuid,
- 'link_name' : link_name,
- 'created_at': now,
- 'updated_at': now,
+ 'link_uuid' : link_uuid,
+ 'link_name' : link_name,
+ 'total_capacity_gbps' : total_capacity_gbps,
+ 'used_capacity_gbps' : used_capacity_gbps,
+ 'created_at' : now,
+ 'updated_at' : now,
}]
def callback(session : Session) -> Tuple[bool, List[Dict]]:
@@ -111,8 +125,10 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link)
stmt = stmt.on_conflict_do_update(
index_elements=[LinkModel.link_uuid],
set_=dict(
- link_name = stmt.excluded.link_name,
- updated_at = stmt.excluded.updated_at,
+ link_name = stmt.excluded.link_name,
+ total_capacity_gbps = stmt.excluded.total_capacity_gbps,
+ used_capacity_gbps = stmt.excluded.used_capacity_gbps,
+ updated_at = stmt.excluded.updated_at,
)
)
stmt = stmt.returning(LinkModel.created_at, LinkModel.updated_at)
diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py
index 9c16da3c9146f28352e8b4f7a6f9ab85f870c8b7..d91666652e6b7e506b9718903d0fb095b4ea69c4 100644
--- a/src/context/service/database/models/LinkModel.py
+++ b/src/context/service/database/models/LinkModel.py
@@ -13,7 +13,7 @@
# limitations under the License.
import operator
-from sqlalchemy import CheckConstraint, Column, DateTime, ForeignKey, Integer, String
+from sqlalchemy import CheckConstraint, Column, DateTime, Float, ForeignKey, Integer, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from typing import Dict
@@ -22,19 +22,26 @@ from ._Base import _Base
class LinkModel(_Base):
__tablename__ = 'link'
- link_uuid = Column(UUID(as_uuid=False), primary_key=True)
- link_name = Column(String, nullable=False)
- created_at = Column(DateTime, nullable=False)
- updated_at = Column(DateTime, nullable=False)
+ link_uuid = Column(UUID(as_uuid=False), primary_key=True)
+ link_name = Column(String, nullable=False)
+ total_capacity_gbps = Column(Float, nullable=True)
+ used_capacity_gbps = Column(Float, nullable=True)
+ created_at = Column(DateTime, nullable=False)
+ updated_at = Column(DateTime, nullable=False)
#topology_links = relationship('TopologyLinkModel', back_populates='link')
link_endpoints = relationship('LinkEndPointModel') # lazy='joined', back_populates='link'
+ __table_args__ = (
+ CheckConstraint(total_capacity_gbps >= 0, name='check_value_total_capacity_gbps'),
+ CheckConstraint(used_capacity_gbps >= 0, name='check_value_used_capacity_gbps' ),
+ )
+
def dump_id(self) -> Dict:
return {'link_uuid': {'uuid': self.link_uuid}}
def dump(self) -> Dict:
- return {
+ result = {
'link_id' : self.dump_id(),
'name' : self.link_name,
'link_endpoint_ids': [
@@ -42,6 +49,13 @@ class LinkModel(_Base):
for link_endpoint in sorted(self.link_endpoints, key=operator.attrgetter('position'))
],
}
+ if self.total_capacity_gbps is not None:
+ attributes : Dict = result.setdefault('attributes', dict())
+ attributes.setdefault('total_capacity_gbps', self.total_capacity_gbps)
+ if self.used_capacity_gbps is not None:
+ attributes : Dict = result.setdefault('attributes', dict())
+ attributes.setdefault('used_capacity_gbps', self.used_capacity_gbps)
+ return result
class LinkEndPointModel(_Base):
__tablename__ = 'link_endpoint'
diff --git a/src/context/service/database/models/enums/KpiSampleType.py b/src/context/service/database/models/enums/KpiSampleType.py
index 5cef9ac199a0cc3389092e4ea375940e27554066..a229b5698ecc393afced41f885bf4c88ede4543f 100644
--- a/src/context/service/database/models/enums/KpiSampleType.py
+++ b/src/context/service/database/models/enums/KpiSampleType.py
@@ -22,11 +22,13 @@ from ._GrpcToEnum import grpc_to_enum
# BYTES_RECEIVED. If item name does not match, automatic mapping of
# proto enums to database enums will fail.
class ORM_KpiSampleTypeEnum(enum.Enum):
- UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN
- PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
- PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
- BYTES_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED
- BYTES_RECEIVED = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED
+ UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN
+ PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
+ PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+ BYTES_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED
+ BYTES_RECEIVED = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED
+ LINK_TOTAL_CAPACITY_GBPS = KpiSampleType.KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS
+ LINK_USED_CAPACITY_GBPS = KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS
grpc_to_enum__kpi_sample_type = functools.partial(
grpc_to_enum, KpiSampleType, ORM_KpiSampleTypeEnum)
diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py
index 6b52ef4c0f3583de628706ba79efffb9d5709820..785a50e1934269150381a1d6d3b08001574a0cee 100644
--- a/src/context/tests/Objects.py
+++ b/src/context/tests/Objects.py
@@ -71,18 +71,32 @@ DEVICE_R3_NAME, DEVICE_R3_ID, DEVICE_R3 = compose_device('R3', ['1.1', '1.2', '2
# ----- Link -----------------------------------------------------------------------------------------------------------
-def compose_link(name : str, endpoint_ids : List[Tuple[str, str]]) -> Tuple[str, Dict, Dict]:
+def compose_link(
+ name : str, endpoint_ids : List[Tuple[str, str]],
+ total_capacity_gbps : Optional[float] = None, used_capacity_gbps : Optional[float] = None
+) -> Tuple[str, Dict, Dict]:
link_id = json_link_id(name)
endpoint_ids = [
json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID)
for device_id, endpoint_name in endpoint_ids
]
- link = json_link(name, endpoint_ids)
+ link = json_link(
+ name, endpoint_ids, total_capacity_gbps=total_capacity_gbps, used_capacity_gbps=used_capacity_gbps
+ )
return name, link_id, link
-LINK_R1_R2_NAME, LINK_R1_R2_ID, LINK_R1_R2 = compose_link('R1==R2', [(DEVICE_R1_ID, '1.2'), (DEVICE_R2_ID, '1.1')])
-LINK_R2_R3_NAME, LINK_R2_R3_ID, LINK_R2_R3 = compose_link('R2==R3', [(DEVICE_R2_ID, '1.3'), (DEVICE_R3_ID, '1.2')])
-LINK_R1_R3_NAME, LINK_R1_R3_ID, LINK_R1_R3 = compose_link('R1==R3', [(DEVICE_R1_ID, '1.3'), (DEVICE_R3_ID, '1.1')])
+LINK_R1_R2_NAME, LINK_R1_R2_ID, LINK_R1_R2 = compose_link(
+ 'R1==R2', [(DEVICE_R1_ID, '1.2'), (DEVICE_R2_ID, '1.1')],
+ total_capacity_gbps=100, # used_capacity_gbps=None => used_capacity_gbps=total_capacity_gbps
+)
+LINK_R2_R3_NAME, LINK_R2_R3_ID, LINK_R2_R3 = compose_link(
+ 'R2==R3', [(DEVICE_R2_ID, '1.3'), (DEVICE_R3_ID, '1.2')],
+ total_capacity_gbps=100, # used_capacity_gbps=None => used_capacity_gbps=total_capacity_gbps
+)
+LINK_R1_R3_NAME, LINK_R1_R3_ID, LINK_R1_R3 = compose_link(
+ 'R1==R3', [(DEVICE_R1_ID, '1.3'), (DEVICE_R3_ID, '1.1')],
+ total_capacity_gbps=100, # used_capacity_gbps=None => used_capacity_gbps=total_capacity_gbps
+)
# ----- Service --------------------------------------------------------------------------------------------------------
diff --git a/src/context/tests/test_link.py b/src/context/tests/test_link.py
index 894ef8ef1472e4b451314970883cb9467c63b02b..8b07f0230cc12add4ab0f2db78f3663cb021ca3a 100644
--- a/src/context/tests/test_link.py
+++ b/src/context/tests/test_link.py
@@ -95,6 +95,13 @@ def test_link(context_client : ContextClient) -> None:
assert response.link_id.link_uuid.uuid == link_uuid
assert response.name == LINK_R1_R2_NAME
assert len(response.link_endpoint_ids) == 2
+ assert response.HasField('attributes')
+ # In proto3, HasField() does not work for scalar fields, using ListFields() instead.
+ attribute_names = set([field.name for field,_ in response.attributes.ListFields()])
+ assert 'total_capacity_gbps' in attribute_names
+ assert abs(response.attributes.total_capacity_gbps - 100) < 1.e-12
+ assert 'used_capacity_gbps' in attribute_names
+ assert abs(response.attributes.used_capacity_gbps - response.attributes.total_capacity_gbps) < 1.e-12
# ----- List when the object exists --------------------------------------------------------------------------------
response = context_client.ListLinkIds(Empty())
@@ -111,6 +118,8 @@ def test_link(context_client : ContextClient) -> None:
new_link_name = 'new'
LINK_UPDATED = copy.deepcopy(LINK_R1_R2)
LINK_UPDATED['name'] = new_link_name
+ LINK_UPDATED['attributes']['total_capacity_gbps'] = 200
+ LINK_UPDATED['attributes']['used_capacity_gbps'] = 50
response = context_client.SetLink(Link(**LINK_UPDATED))
assert response.link_uuid.uuid == link_uuid
@@ -125,6 +134,13 @@ def test_link(context_client : ContextClient) -> None:
assert response.link_id.link_uuid.uuid == link_uuid
assert response.name == new_link_name
assert len(response.link_endpoint_ids) == 2
+ assert response.HasField('attributes')
+ # In proto3, HasField() does not work for scalar fields, using ListFields() instead.
+ attribute_names = set([field.name for field,_ in response.attributes.ListFields()])
+ assert 'total_capacity_gbps' in attribute_names
+ assert abs(response.attributes.total_capacity_gbps - 200) < 1.e-12
+ assert 'used_capacity_gbps' in attribute_names
+ assert abs(response.attributes.used_capacity_gbps - 50) < 1.e-12
# ----- List when the object is modified ---------------------------------------------------------------------------
response = context_client.ListLinkIds(Empty())
@@ -136,6 +152,14 @@ def test_link(context_client : ContextClient) -> None:
assert response.links[0].link_id.link_uuid.uuid == link_uuid
assert response.links[0].name == new_link_name
assert len(response.links[0].link_endpoint_ids) == 2
+ assert len(response.links[0].link_endpoint_ids) == 2
+ assert response.links[0].HasField('attributes')
+ # In proto3, HasField() does not work for scalar fields, using ListFields() instead.
+ attribute_names = set([field.name for field,_ in response.links[0].attributes.ListFields()])
+ assert 'total_capacity_gbps' in attribute_names
+ assert abs(response.links[0].attributes.total_capacity_gbps - 200) < 1.e-12
+ assert 'used_capacity_gbps' in attribute_names
+ assert abs(response.links[0].attributes.used_capacity_gbps - 50) < 1.e-12
# ----- Check relation was created ---------------------------------------------------------------------------------
response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID))
diff --git a/src/device/requirements.in b/src/device/requirements.in
index c81e814603d4c84e0211e3b433fc916b616ecd04..ece761571ec2ff9c3376b1062787d76047d71e7c 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -20,6 +20,7 @@ cryptography==36.0.2
Jinja2==3.0.3
ncclient==0.6.13
p4runtime==1.3.0
+pandas==1.5.*
paramiko==2.9.2
python-json-logger==2.0.2
#pytz==2021.3
diff --git a/src/forecaster/.gitlab-ci.yml b/src/forecaster/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..09b2f8f4e67db6ce152da608baff6f51279a1dc8
--- /dev/null
+++ b/src/forecaster/.gitlab-ci.yml
@@ -0,0 +1,107 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build forecaster:
+ variables:
+ IMAGE_NAME: 'forecaster' # name of the microservice
+ IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+ stage: build
+ before_script:
+ - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+ script:
+ - docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+ - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+ - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+ after_script:
+ - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+ - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+ - changes:
+ - src/common/**/*.py
+ - proto/*.proto
+ - src/$IMAGE_NAME/**/*.{py,in,yml}
+ - src/$IMAGE_NAME/Dockerfile
+ - src/$IMAGE_NAME/tests/*.py
+ - manifests/${IMAGE_NAME}service.yaml
+ - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test forecaster:
+ variables:
+ IMAGE_NAME: 'forecaster' # name of the microservice
+ IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+ stage: unit_test
+ needs:
+ - build forecaster
+ before_script:
+ - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+ - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+ - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
+ - docker container prune -f
+ script:
+ - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+ - docker run --name $IMAGE_NAME -d -p 10040:10040 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+ - sleep 5
+ - docker ps -a
+ - docker logs $IMAGE_NAME
+ - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
+ - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+ coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+ after_script:
+ - docker rm -f $IMAGE_NAME
+ - docker network rm teraflowbridge
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+ - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+ - changes:
+ - src/common/**/*.py
+ - proto/*.proto
+ - src/$IMAGE_NAME/**/*.{py,in,yml}
+ - src/$IMAGE_NAME/Dockerfile
+ - src/$IMAGE_NAME/tests/*.py
+ - src/$IMAGE_NAME/tests/Dockerfile
+ - manifests/${IMAGE_NAME}service.yaml
+ - .gitlab-ci.yml
+ artifacts:
+ when: always
+ reports:
+ junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy forecaster:
+# variables:
+# IMAGE_NAME: 'forecaster' # name of the microservice
+# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+# stage: deploy
+# needs:
+# - unit test forecaster
+# # - integ_test execute
+# script:
+# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+# - kubectl version
+# - kubectl get all
+# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+# - kubectl get all
+# # environment:
+# # name: test
+# # url: https://example.com
+# # kubernetes:
+# # namespace: test
+# rules:
+# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+# when: manual
+# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+# when: manual
diff --git a/src/forecaster/Config.py b/src/forecaster/Config.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d80b8fe62ff2e8313ec6a7b1b0278fea7c16950
--- /dev/null
+++ b/src/forecaster/Config.py
@@ -0,0 +1,21 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+# FORECAST_TO_HISTORY_RATIO indicates the size of the trainset.
+# For example a history ratio of 10 would imply that the train-set will be 10 times bigger
+# than the forecast period and the test-set.
+DEFAULT_FORECAST_TO_HISTORY_RATIO = 10
+FORECAST_TO_HISTORY_RATIO = int(os.environ.get('FORECAST_TO_HISTORY_RATIO', DEFAULT_FORECAST_TO_HISTORY_RATIO))
diff --git a/src/forecaster/Dockerfile b/src/forecaster/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..c09ab8007396895df0a9e4a07cf5984b9d662b3e
--- /dev/null
+++ b/src/forecaster/Dockerfile
@@ -0,0 +1,78 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+ apt-get --yes --quiet --quiet install wget g++ git && \
+ rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+ wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+ chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/forecaster
+WORKDIR /var/teraflow/forecaster
+COPY src/forecaster/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/context/__init__.py context/__init__.py
+COPY src/context/client/. context/client/
+COPY src/device/__init__.py device/__init__.py
+COPY src/device/client/. device/client/
+COPY src/monitoring/__init__.py monitoring/__init__.py
+COPY src/monitoring/client/. monitoring/client/
+COPY src/service/__init__.py service/__init__.py
+COPY src/service/client/. service/client/
+COPY src/slice/__init__.py slice/__init__.py
+COPY src/slice/client/. slice/client/
+COPY src/forecaster/. forecaster/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "forecaster.service"]
diff --git a/src/forecaster/__init__.py b/src/forecaster/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..38d04994fb0fa1951fb465bc127eb72659dc2eaf
--- /dev/null
+++ b/src/forecaster/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/forecaster/client/ForecasterClient.py b/src/forecaster/client/ForecasterClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..17e0beb339f5dbe748a211c4286a98a376ed6084
--- /dev/null
+++ b/src/forecaster/client/ForecasterClient.py
@@ -0,0 +1,63 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.forecaster_pb2 import (
+ ForecastLinkCapacityReply, ForecastLinkCapacityRequest,
+ ForecastTopologyCapacityReply, ForecastTopologyCapacityRequest
+)
+from common.proto.forecaster_pb2_grpc import ForecasterServiceStub
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class ForecasterClient:
+ def __init__(self, host=None, port=None):
+ if not host: host = get_service_host(ServiceNameEnum.FORECASTER)
+ if not port: port = get_service_port_grpc(ServiceNameEnum.FORECASTER)
+ self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+ LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+ self.channel = None
+ self.stub = None
+ self.connect()
+ LOGGER.debug('Channel created')
+
+ def connect(self):
+ self.channel = grpc.insecure_channel(self.endpoint)
+ self.stub = ForecasterServiceStub(self.channel)
+
+ def close(self):
+ if self.channel is not None: self.channel.close()
+ self.channel = None
+ self.stub = None
+
+ @RETRY_DECORATOR
+ def ForecastLinkCapacity(self, request : ForecastLinkCapacityRequest) -> ForecastLinkCapacityReply:
+ LOGGER.debug('ForecastLinkCapacity request: {:s}'.format(grpc_message_to_json_string(request)))
+ response = self.stub.ForecastLinkCapacity(request)
+ LOGGER.debug('ForecastLinkCapacity result: {:s}'.format(grpc_message_to_json_string(response)))
+ return response
+
+ @RETRY_DECORATOR
+ def ForecastTopologyCapacity(self, request : ForecastTopologyCapacityRequest) -> ForecastTopologyCapacityReply:
+ LOGGER.debug('ForecastTopologyCapacity request: {:s}'.format(grpc_message_to_json_string(request)))
+ response = self.stub.ForecastTopologyCapacity(request)
+ LOGGER.debug('ForecastTopologyCapacity result: {:s}'.format(grpc_message_to_json_string(response)))
+ return response
diff --git a/src/forecaster/client/__init__.py b/src/forecaster/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/forecaster/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/forecaster/requirements.in b/src/forecaster/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..3ed37c5998a550427f987d881e5ce4455b5e1649
--- /dev/null
+++ b/src/forecaster/requirements.in
@@ -0,0 +1,18 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#numpy==1.23.*
+pandas==1.5.*
+#prophet==1.1.*
+scikit-learn==1.1.*
diff --git a/src/forecaster/service/Forecaster.py b/src/forecaster/service/Forecaster.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2b5b4d09af35752ffa221eabaf40b1d22515d32
--- /dev/null
+++ b/src/forecaster/service/Forecaster.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math, pandas
+from datetime import datetime, timezone
+from statistics import mean
+from sklearn.ensemble import RandomForestRegressor
+from common.proto.monitoring_pb2 import KpiId
+from forecaster.Config import FORECAST_TO_HISTORY_RATIO
+
+def compute_forecast(samples : pandas.DataFrame, kpi_id : KpiId) -> float:
+ kpi_uuid = kpi_id.kpi_id.uuid
+ samples = samples[samples.kpi_id == kpi_uuid].copy()
+
+ num_samples = samples.shape[0]
+ if num_samples <= 0:
+ MSG = 'KpiId({:s}): Wrong number of samples: {:d}'
+ raise Exception(MSG.format(kpi_uuid, num_samples))
+
+ num_samples_test = math.ceil(num_samples / FORECAST_TO_HISTORY_RATIO)
+ if num_samples_test <= 0:
+ MSG = 'KpiId({:s}): Wrong number of test samples: {:d}'
+ raise Exception(MSG.format(kpi_uuid, num_samples_test ))
+
+ num_samples_train = num_samples - num_samples_test
+ if num_samples_train <= 0:
+ MSG = 'KpiId({:s}): Wrong number of train samples: {:d}'
+ raise Exception(MSG.format(kpi_uuid, num_samples_train))
+
+ samples['timestamp'] = pandas.to_datetime(samples['timestamp']) - datetime(1970, 1, 1, tzinfo=timezone.utc)
+ samples['timestamp'] = samples['timestamp'].dt.total_seconds()
+
+ train_set = samples[0:num_samples_train]
+ test_set = samples[num_samples_train:num_samples]
+
+ rfr = RandomForestRegressor(n_estimators=600, random_state=42)
+ rfr.fit(train_set.drop(['kpi_id', 'value'], axis=1), train_set['value'])
+ forecast = rfr.predict(test_set.drop(['kpi_id', 'value'], axis=1))
+ avg_forecast = round(mean(forecast), 2)
+ return avg_forecast
diff --git a/src/forecaster/service/ForecasterService.py b/src/forecaster/service/ForecasterService.py
new file mode 100644
index 0000000000000000000000000000000000000000..944ceb01e1429df4e124d28993cf001bb683aeb5
--- /dev/null
+++ b/src/forecaster/service/ForecasterService.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.proto.forecaster_pb2_grpc import add_ForecasterServiceServicer_to_server
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from .ForecasterServiceServicerImpl import ForecasterServiceServicerImpl
+
+class ForecasterService(GenericGrpcService):
+ def __init__(self, cls_name: str = __name__) -> None:
+ port = get_service_port_grpc(ServiceNameEnum.FORECASTER)
+ super().__init__(port, cls_name=cls_name)
+ self.forecaster_servicer = ForecasterServiceServicerImpl()
+
+ def install_servicers(self):
+ add_ForecasterServiceServicer_to_server(self.forecaster_servicer, self.server)
diff --git a/src/forecaster/service/ForecasterServiceServicerImpl.py b/src/forecaster/service/ForecasterServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..41f6a59fd1e99f1ca336b65139eb9399d1aeec1a
--- /dev/null
+++ b/src/forecaster/service/ForecasterServiceServicerImpl.py
@@ -0,0 +1,126 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List
+import grpc, logging
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.proto.context_pb2 import LinkAttributes, LinkId
+from common.proto.forecaster_pb2 import (
+ ForecastLinkCapacityReply, ForecastLinkCapacityRequest,
+ ForecastTopologyCapacityReply, ForecastTopologyCapacityRequest
+)
+from common.proto.forecaster_pb2_grpc import ForecasterServiceServicer
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.tools.context_queries.Link import get_link
+from common.tools.context_queries.Topology import get_topology_details
+from common.tools.timestamp.Converters import timestamp_utcnow_to_float
+from context.client.ContextClient import ContextClient
+from forecaster.Config import FORECAST_TO_HISTORY_RATIO
+from forecaster.service.Forecaster import compute_forecast
+from forecaster.service.KpiManager import KpiManager
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Forecaster', 'RPC')
+
+class ForecasterServiceServicerImpl(ForecasterServiceServicer):
+ def __init__(self) -> None:
+ LOGGER.debug('Creating Servicer...')
+ self._kpi_manager = KpiManager()
+ LOGGER.debug('Servicer Created')
+
+ @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+ def ForecastLinkCapacity(
+ self, request : ForecastLinkCapacityRequest, context : grpc.ServicerContext
+ ) -> ForecastLinkCapacityReply:
+ forecast_window_seconds = request.forecast_window_seconds
+
+ # history_window_seconds indicates the size of the train-set based on the
+ # requested size of the test-set and the configured history ratio
+ history_window_seconds = FORECAST_TO_HISTORY_RATIO * forecast_window_seconds
+
+ link_id = request.link_id
+ link_uuid = link_id.link_uuid.uuid
+
+ context_client = ContextClient()
+ link = get_link(context_client, link_uuid)
+ if link is None: raise NotFoundException('Link', link_uuid)
+
+ kpi_id_map = self._kpi_manager.get_kpi_ids_from_link_ids([link_id])
+ link_uuid__to__kpi_id = {
+ _link_uuid : _kpi_id
+ for (_link_uuid, _kpi_sample_type), _kpi_id in kpi_id_map.items()
+ if _kpi_sample_type == KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS
+ }
+ kpi_id = link_uuid__to__kpi_id[link_uuid]
+
+ end_timestamp = timestamp_utcnow_to_float()
+ start_timestamp = end_timestamp - history_window_seconds
+ df_historical_data = self._kpi_manager.get_kpi_id_samples([kpi_id], start_timestamp, end_timestamp)
+ forecast_used_capacity_gbps = compute_forecast(df_historical_data, kpi_id)
+
+ reply = ForecastLinkCapacityReply()
+ reply.link_id.link_uuid.uuid = link_uuid
+ reply.total_capacity_gbps = link.attributes.total_capacity_gbps
+ reply.current_used_capacity_gbps = link.attributes.used_capacity_gbps
+ reply.forecast_used_capacity_gbps = forecast_used_capacity_gbps
+ return reply
+
+ @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+ def ForecastTopologyCapacity(
+ self, request : ForecastTopologyCapacityRequest, context : grpc.ServicerContext
+ ) -> ForecastTopologyCapacityReply:
+ forecast_window_seconds = request.forecast_window_seconds
+
+ # history_window_seconds indicates the size of the train-set based on the
+ # requested size of the test-set and the configured history ratio
+ history_window_seconds = FORECAST_TO_HISTORY_RATIO * forecast_window_seconds
+
+ context_uuid = request.topology_id.context_id.context_uuid.uuid
+ topology_uuid = request.topology_id.topology_uuid.uuid
+ context_client = ContextClient()
+ topology_details = get_topology_details(context_client, topology_uuid, context_uuid=context_uuid)
+ if topology_details is None:
+ topology_uuid = '{:s}/{:s}'.format(context_uuid, topology_uuid)
+ raise NotFoundException('Topology', topology_uuid)
+
+ link_ids : List[LinkId] = list()
+ link_capacities : Dict[str, LinkAttributes] = dict()
+ for link in topology_details.links:
+ link_ids.append(link.link_id)
+ link_capacities[link.link_id.link_uuid.uuid] = link.attributes
+
+ kpi_id_map = self._kpi_manager.get_kpi_ids_from_link_ids(link_ids)
+ link_uuid__to__kpi_id = {
+ _link_id : _kpi_id
+ for (_link_id, _kpi_sample_type), _kpi_id in kpi_id_map.items()
+ if _kpi_sample_type == KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS
+ }
+
+ kpi_ids = list(link_uuid__to__kpi_id.values())
+ end_timestamp = timestamp_utcnow_to_float()
+ start_timestamp = end_timestamp - history_window_seconds
+ df_historical_data = self._kpi_manager.get_kpi_id_samples(kpi_ids, start_timestamp, end_timestamp)
+
+ reply = ForecastTopologyCapacityReply()
+ for link_uuid, kpi_id in link_uuid__to__kpi_id.items():
+ link_attributes = link_capacities[link_uuid]
+ forecast_used_capacity_gbps = compute_forecast(df_historical_data, kpi_id)
+ link_capacity : ForecastLinkCapacityReply = reply.link_capacities.add() # pylint: disable=no-member
+ link_capacity.link_id.link_uuid.uuid = link_uuid
+ link_capacity.total_capacity_gbps = link_attributes.total_capacity_gbps
+ link_capacity.current_used_capacity_gbps = link_attributes.used_capacity_gbps
+ link_capacity.forecast_used_capacity_gbps = forecast_used_capacity_gbps
+ return reply
diff --git a/src/forecaster/service/KpiManager.py b/src/forecaster/service/KpiManager.py
new file mode 100644
index 0000000000000000000000000000000000000000..15864c5936b00b792e83c78934c8cc84286662eb
--- /dev/null
+++ b/src/forecaster/service/KpiManager.py
@@ -0,0 +1,57 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pandas
+from typing import Dict, List, Tuple
+from common.proto.context_pb2 import Empty, LinkId
+from common.proto.monitoring_pb2 import KpiId, KpiQuery
+from monitoring.client.MonitoringClient import MonitoringClient
+
+class KpiManager:
+ def __init__(self) -> None:
+ self._monitoring_client = MonitoringClient()
+
+ def get_kpi_ids_from_link_ids(
+ self, link_ids : List[LinkId]
+ ) -> Dict[Tuple[str, int], KpiId]:
+ link_uuids = {link_id.link_uuid.uuid for link_id in link_ids}
+ kpi_descriptors = self._monitoring_client.GetKpiDescriptorList(Empty())
+ kpi_ids : Dict[Tuple[str, int], KpiId] = {
+ (kpi_descriptor.link_id.link_uuid.uuid, kpi_descriptor.kpi_sample_type) : kpi_descriptor.kpi_id
+ for kpi_descriptor in kpi_descriptors.kpi_descriptor_list
+ if kpi_descriptor.link_id.link_uuid.uuid in link_uuids
+ }
+ return kpi_ids
+
+ def get_kpi_id_samples(
+ self, kpi_ids : List[KpiId], start_timestamp : float, end_timestamp : float
+ ) -> pandas.DataFrame:
+ kpi_query = KpiQuery()
+ for kpi_id in kpi_ids: kpi_query.kpi_ids.add().kpi_id.uuid = kpi_id.kpi_id.uuid
+ kpi_query.start_timestamp.timestamp = start_timestamp # pylint: disable=no-member
+ kpi_query.end_timestamp.timestamp = end_timestamp # pylint: disable=no-member
+ raw_kpi_table = self._monitoring_client.QueryKpiData(kpi_query)
+
+ data : List[Tuple[str, float, float]] = list()
+ for raw_kpi_list in raw_kpi_table.raw_kpi_lists:
+ kpi_uuid = raw_kpi_list.kpi_id.kpi_id.uuid
+ for raw_kpi in raw_kpi_list.raw_kpis:
+ timestamp = raw_kpi.timestamp.timestamp
+ value = float(getattr(raw_kpi.kpi_value, raw_kpi.kpi_value.WhichOneof('value')))
+ data.append((timestamp, kpi_uuid, value))
+
+ df = pandas.DataFrame(data, columns=['timestamp', 'kpi_id', 'value'])
+ df['timestamp'] = pandas.to_datetime(df['timestamp'].astype('int'), unit='s', utc=True)
+ df.sort_values('timestamp', ascending=True, inplace=True)
+ return df
diff --git a/src/forecaster/service/__init__.py b/src/forecaster/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/forecaster/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/forecaster/service/__main__.py b/src/forecaster/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..780fe5f8f3386b571c37ce64a0b95578e9641110
--- /dev/null
+++ b/src/forecaster/service/__main__.py
@@ -0,0 +1,70 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+ ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+ wait_for_environment_variables)
+from .ForecasterService import ForecasterService
+
+terminate = threading.Event()
+LOGGER : logging.Logger = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+ LOGGER.warning('Terminate signal received')
+ terminate.set()
+
+def main():
+ global LOGGER # pylint: disable=global-statement
+
+ log_level = get_log_level()
+ logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+ logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
+ logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
+ logging.getLogger('monitoring-client').setLevel(logging.WARNING)
+ LOGGER = logging.getLogger(__name__)
+
+ wait_for_environment_variables([
+ get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ),
+ get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+ get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_HOST ),
+ get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+ ])
+
+ signal.signal(signal.SIGINT, signal_handler)
+ signal.signal(signal.SIGTERM, signal_handler)
+
+ LOGGER.info('Starting...')
+
+ # Start metrics server
+ metrics_port = get_metrics_port()
+ start_http_server(metrics_port)
+
+ # Starting Forecaster service
+ grpc_service = ForecasterService()
+ grpc_service.start()
+
+ # Wait for Ctrl+C or termination signal
+ while not terminate.wait(timeout=1.0): pass
+
+ LOGGER.info('Terminating...')
+ grpc_service.stop()
+
+ LOGGER.info('Bye')
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/forecaster/tests/MockService_Dependencies.py b/src/forecaster/tests/MockService_Dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..858db17a9e35e30ea93c965815b39a068c696b4b
--- /dev/null
+++ b/src/forecaster/tests/MockService_Dependencies.py
@@ -0,0 +1,49 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import Union
+from common.Constants import ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name
+from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from common.proto.monitoring_pb2_grpc import add_MonitoringServiceServicer_to_server
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.tests.MockServicerImpl_Monitoring import MockServicerImpl_Monitoring
+from common.tools.service.GenericGrpcService import GenericGrpcService
+
+LOCAL_HOST = '127.0.0.1'
+
+SERVICE_CONTEXT = ServiceNameEnum.CONTEXT
+SERVICE_MONITORING = ServiceNameEnum.MONITORING
+
+class MockService_Dependencies(GenericGrpcService):
+ # Mock Service implementing Context, Device, and Service to simplify unitary tests of PathComp
+
+ def __init__(self, bind_port: Union[str, int]) -> None:
+ super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+ # pylint: disable=attribute-defined-outside-init
+ def install_servicers(self):
+ self.context_servicer = MockServicerImpl_Context()
+ add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+ self.monitoring_servicer = MockServicerImpl_Monitoring()
+ add_MonitoringServiceServicer_to_server(self.monitoring_servicer, self.server)
+
+ def configure_env_vars(self):
+ os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address)
+ os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
+
+ os.environ[get_env_var_name(SERVICE_MONITORING, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address)
+ os.environ[get_env_var_name(SERVICE_MONITORING, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
diff --git a/src/forecaster/tests/PrepareTestScenario.py b/src/forecaster/tests/PrepareTestScenario.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d383f616cce90532228efad515ef5a12509403e
--- /dev/null
+++ b/src/forecaster/tests/PrepareTestScenario.py
@@ -0,0 +1,66 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest, os
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+ ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from context.client.ContextClient import ContextClient
+from forecaster.client.ForecasterClient import ForecasterClient
+from forecaster.service.ForecasterService import ForecasterService
+from monitoring.client.MonitoringClient import MonitoringClient
+from .MockService_Dependencies import MockService_Dependencies
+
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+# avoid privileged ports
+FORECASTER_SERVICE_PORT = MOCKSERVICE_PORT + int(get_service_port_grpc(ServiceNameEnum.FORECASTER))
+os.environ[get_env_var_name(ServiceNameEnum.FORECASTER, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.FORECASTER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(FORECASTER_SERVICE_PORT)
+
+@pytest.fixture(scope='session')
+def mock_service():
+ _service = MockService_Dependencies(MOCKSERVICE_PORT)
+ _service.configure_env_vars()
+ _service.start()
+ yield _service
+ _service.stop()
+
+@pytest.fixture(scope='session')
+def context_client(mock_service : MockService_Dependencies): # pylint: disable=redefined-outer-name
+ _client = ContextClient()
+ yield _client
+ _client.close()
+
+@pytest.fixture(scope='session')
+def monitoring_client(mock_service : MockService_Dependencies): # pylint: disable=redefined-outer-name
+ _client = MonitoringClient()
+ yield _client
+ _client.close()
+
+@pytest.fixture(scope='session')
+def forecaster_service(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name
+):
+ _service = ForecasterService()
+ _service.start()
+ yield _service
+ _service.stop()
+
+@pytest.fixture(scope='session')
+def forecaster_client(forecaster_service : ForecasterService): # pylint: disable=redefined-outer-name
+ _client = ForecasterClient()
+ yield _client
+ _client.close()
diff --git a/src/forecaster/tests/Tools.py b/src/forecaster/tests/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fd05f100f1b488aea4e4e1db9502675ac6e9a9f
--- /dev/null
+++ b/src/forecaster/tests/Tools.py
@@ -0,0 +1,125 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, math, pandas
+from typing import Dict
+from common.tools.object_factory.Context import json_context
+from common.tools.object_factory.Device import (
+ json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id
+)
+from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
+from common.tools.object_factory.Link import json_link
+from common.tools.object_factory.Topology import json_topology
+from common.tools.timestamp.Converters import timestamp_datetime_to_int, timestamp_utcnow_to_float
+
+LOGGER = logging.getLogger(__name__)
+
+def read_csv(csv_file : str) -> pandas.DataFrame:
+ LOGGER.info('Using Data File "{:s}"...'.format(csv_file))
+
+ LOGGER.info('Loading...')
+ df = pandas.read_csv(csv_file)
+ LOGGER.info(' DONE')
+
+ LOGGER.info('Parsing and Adapting columns...')
+ if 'dataset.csv' in csv_file:
+ df.rename(columns={'linkid': 'link_id', 'ds': 'timestamp', 'y': 'used_capacity_gbps'}, inplace=True)
+ df[['source', 'destination']] = df['link_id'].str.split('_', expand=True)
+ #elif 'dataset2.csv' in csv_file:
+ # df.drop(columns=['Unnamed: 0'], inplace=True)
+ # df.rename(columns={
+ # 'target': 'destination', 'id': 'link_id', 'ds': 'timestamp', 'demandValue': 'used_capacity_gbps'
+ # }, inplace=True)
+ LOGGER.info(' DONE')
+
+ LOGGER.info('Updating timestamps...')
+ df['timestamp'] = pandas.to_datetime(df['timestamp'])
+ max_timestamp = timestamp_datetime_to_int(df['timestamp'].max())
+ now_timestamp = timestamp_utcnow_to_float()
+ df['timestamp'] = df['timestamp'] + pandas.offsets.Second(now_timestamp - max_timestamp)
+ LOGGER.info(' DONE')
+
+ LOGGER.info('Sorting...')
+ df.sort_values('timestamp', ascending=True, inplace=True)
+ LOGGER.info(' DONE')
+
+ return df
+
+def compose_descriptors(df : pandas.DataFrame, num_client_endpoints : int = 0) -> Dict:
+ devices = dict()
+ links = dict()
+
+ LOGGER.info('Discovering Devices and Links...')
+ #df1.groupby(['A','B']).size().reset_index().rename(columns={0:'count'})
+ df_links = df[['link_id', 'source', 'destination']].drop_duplicates()
+ for row in df_links.itertuples(index=False):
+ #print(row)
+ link_uuid = row.link_id
+ src_device_uuid = row.source
+ dst_device_uuid = row.destination
+ src_port_uuid = row.destination
+ dst_port_uuid = row.source
+
+ if src_device_uuid not in devices:
+ endpoints = set()
+ for num_client_endpoint in range(num_client_endpoints):
+ endpoints.add('client:{:d}'.format(num_client_endpoint))
+ devices[src_device_uuid] = {'id': src_device_uuid, 'endpoints': endpoints}
+ devices[src_device_uuid]['endpoints'].add(src_port_uuid)
+
+ if dst_device_uuid not in devices:
+ endpoints = set()
+ for num_client_endpoint in range(num_client_endpoints):
+ endpoints.add('client:{:d}'.format(num_client_endpoint))
+ devices[dst_device_uuid] = {'id': dst_device_uuid, 'endpoints': endpoints}
+ devices[dst_device_uuid]['endpoints'].add(dst_port_uuid)
+
+ if link_uuid not in links:
+ total_capacity_gbps = df[df.link_id==link_uuid]['used_capacity_gbps'].max()
+ total_capacity_gbps = math.ceil(total_capacity_gbps / 100) * 100 # round up in steps of 100
+ used_capacity_gbps = df[df.link_id==link_uuid].used_capacity_gbps.iat[-1] # get last value
+ links[link_uuid] = {
+ 'id': link_uuid,
+ 'src_dev': src_device_uuid, 'src_port': src_port_uuid,
+ 'dst_dev': dst_device_uuid, 'dst_port': dst_port_uuid,
+ 'total_capacity_gbps': total_capacity_gbps, 'used_capacity_gbps': used_capacity_gbps,
+ }
+ LOGGER.info(' Found {:d} devices and {:d} links...'.format(len(devices), len(links)))
+
+ LOGGER.info('Composing Descriptors...')
+ _context = json_context('admin', name='admin')
+ _topology = json_topology('admin', name='admin', context_id=_context['context_id'])
+ descriptor = {
+ 'dummy_mode': True, # inject the descriptors directly into the Context component
+ 'contexts': [_context],
+ 'topologies': [_topology],
+ 'devices': [
+ json_device_emulated_packet_router_disabled(
+ device_uuid, name=device_uuid, endpoints=[
+ json_endpoint(json_device_id(device_uuid), endpoint_uuid, 'copper')
+ for endpoint_uuid in device_data['endpoints']
+ ], config_rules=json_device_emulated_connect_rules([]))
+ for device_uuid,device_data in devices.items()
+ ],
+ 'links': [
+ json_link(link_uuid, [
+ json_endpoint_id(json_device_id(link_data['src_dev']), link_data['src_port']),
+ json_endpoint_id(json_device_id(link_data['dst_dev']), link_data['dst_port']),
+ ], name=link_uuid, total_capacity_gbps=link_data['total_capacity_gbps'],
+ used_capacity_gbps=link_data['used_capacity_gbps'])
+ for link_uuid,link_data in links.items()
+ ],
+ }
+ LOGGER.info(' DONE')
+ return descriptor
diff --git a/src/forecaster/tests/__init__.py b/src/forecaster/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/forecaster/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/forecaster/tests/data/README.md b/src/forecaster/tests/data/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..eaf303847b745e0663e76f300d602cbc048ff599
--- /dev/null
+++ b/src/forecaster/tests/data/README.md
@@ -0,0 +1,33 @@
+# Manual Forecaster test:
+
+- Move to root folder:
+```bash
+cd ~/tfs-ctrl
+```
+
+- Edit `my_deploy.sh` and enable the `monitoring` and the `forecaster` components:
+```bash
+export TFS_COMPONENTS="context device monitoring forecaster pathcomp service slice compute webui load_generator"
+```
+
+- Edit `deploy/tfs.sh` and disable linkerd injection to capture unencrypted traffic.
+```bash
+cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+#cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
+```
+
+- Deploy TeraFlowSDN controller:
+```bash
+source my_deploy.sh
+./deploy/all.sh
+```
+
+- Onboard the topology descriptor `topology.json` through the WebUI.
+
+- Source the runtime environment variables and inject the link utilization KPI values into the Monitoring database:
+```bash
+source tfs_runtime_env_vars.sh
+python src/forecaster/tests/data/inject_samples.py
+```
+
+- Onboard the service descriptor `service.json` through the WebUI.
diff --git a/src/forecaster/tests/data/dataset.csv b/src/forecaster/tests/data/dataset.csv
new file mode 100644
index 0000000000000000000000000000000000000000..476bfccb53f2903a22f709b30c83d404b52c41d1
--- /dev/null
+++ b/src/forecaster/tests/data/dataset.csv
@@ -0,0 +1,328 @@
+"linkid","ds","y"
+"be1.be_de1.de","2005-07-29 11:30:00",13.633721
+"be1.be_fr1.fr","2005-07-29 11:30:00",37.369276
+"be1.be_gr1.gr","2005-07-29 11:30:00",0.023673
+"be1.be_it1.it","2005-07-29 11:30:00",0.251916
+"be1.be_uk1.uk","2005-07-29 11:30:00",26.126846
+"de1.de_be1.be","2005-07-29 11:30:00",41.771397
+"de1.de_gr1.gr","2005-07-29 11:30:00",137.840494
+"de1.de_uk1.uk","2005-07-29 11:30:00",187.909209
+"es1.es_fr1.fr","2005-07-29 11:30:00",9.989878
+"es1.es_it1.it","2005-07-29 11:30:00",15.279216
+"es1.es_pt1.pt","2005-07-29 11:30:00",18.024652
+"fr1.fr_be1.be","2005-07-29 11:30:00",3.491062
+"fr1.fr_es1.es","2005-07-29 11:30:00",27.289934
+"fr1.fr_it1.it","2005-07-29 11:30:00",5.19262
+"fr1.fr_pt1.pt","2005-07-29 11:30:00",2.075046
+"fr1.fr_uk1.uk","2005-07-29 11:30:00",171.856756
+"gr1.gr_be1.be","2005-07-29 11:30:00",0.463852
+"gr1.gr_de1.de","2005-07-29 11:30:00",4070.266255
+"gr1.gr_it1.it","2005-07-29 11:30:00",6.49398
+"it1.it_be1.be","2005-07-29 11:30:00",26.771257
+"it1.it_es1.es","2005-07-29 11:30:00",209.799198
+"it1.it_fr1.fr","2005-07-29 11:30:00",7.919724
+"it1.it_gr1.gr","2005-07-29 11:30:00",4.624628
+"pt1.pt_es1.es","2005-07-29 11:30:00",89.292105
+"pt1.pt_fr1.fr","2005-07-29 11:30:00",0.978255
+"pt1.pt_uk1.uk","2005-07-29 11:30:00",62.342932
+"uk1.uk_be1.be","2005-07-29 11:30:00",11.812727
+"uk1.uk_de1.de","2005-07-29 11:30:00",255.863122
+"uk1.uk_fr1.fr","2005-07-29 11:30:00",233.412507
+"uk1.uk_pt1.pt","2005-07-29 11:30:00",502.236205
+"be1.be_de1.de","2005-06-20 18:15:00",46.947486
+"be1.be_fr1.fr","2005-06-20 18:15:00",73.202652
+"be1.be_gr1.gr","2005-06-20 18:15:00",0.084347
+"be1.be_it1.it","2005-06-20 18:15:00",0.386309
+"be1.be_uk1.uk","2005-06-20 18:15:00",181.377554
+"de1.de_be1.be","2005-06-20 18:15:00",12.337703
+"de1.de_gr1.gr","2005-06-20 18:15:00",166.013822
+"de1.de_uk1.uk","2005-06-20 18:15:00",196.713395
+"es1.es_fr1.fr","2005-06-20 18:15:00",10.50535
+"es1.es_it1.it","2005-06-20 18:15:00",8.637348
+"es1.es_pt1.pt","2005-06-20 18:15:00",20.493486
+"fr1.fr_be1.be","2005-06-20 18:15:00",15.280566
+"fr1.fr_es1.es","2005-06-20 18:15:00",41.105169
+"fr1.fr_it1.it","2005-06-20 18:15:00",6.188476
+"fr1.fr_pt1.pt","2005-06-20 18:15:00",7.292464
+"fr1.fr_uk1.uk","2005-06-20 18:15:00",39.916047
+"gr1.gr_be1.be","2005-06-20 18:15:00",0.987871
+"gr1.gr_de1.de","2005-06-20 18:15:00",3761.528535
+"gr1.gr_it1.it","2005-06-20 18:15:00",0.762946
+"it1.it_be1.be","2005-06-20 18:15:00",11.693967
+"it1.it_es1.es","2005-06-20 18:15:00",20.093962
+"it1.it_fr1.fr","2005-06-20 18:15:00",25.279273
+"it1.it_gr1.gr","2005-06-20 18:15:00",54.110787
+"pt1.pt_es1.es","2005-06-20 18:15:00",12.870606
+"pt1.pt_fr1.fr","2005-06-20 18:15:00",1.294275
+"pt1.pt_uk1.uk","2005-06-20 18:15:00",335.570213
+"uk1.uk_be1.be","2005-06-20 18:15:00",7.096555
+"uk1.uk_de1.de","2005-06-20 18:15:00",356.532161
+"uk1.uk_fr1.fr","2005-06-20 18:15:00",20.45373
+"uk1.uk_pt1.pt","2005-06-20 18:15:00",791.04219
+"be1.be_de1.de","2005-08-29 16:45:00",20.400142
+"be1.be_fr1.fr","2005-08-29 16:45:00",31.346514
+"be1.be_gr1.gr","2005-08-29 16:45:00",0.026822
+"be1.be_it1.it","2005-08-29 16:45:00",0.357069
+"be1.be_uk1.uk","2005-08-29 16:45:00",8.252107
+"de1.de_be1.be","2005-08-29 16:45:00",57.709307
+"de1.de_gr1.gr","2005-08-29 16:45:00",110.602237
+"de1.de_uk1.uk","2005-08-29 16:45:00",239.446965
+"es1.es_fr1.fr","2005-08-29 16:45:00",20.517778
+"es1.es_it1.it","2005-08-29 16:45:00",15.353667
+"es1.es_pt1.pt","2005-08-29 16:45:00",5.643483
+"fr1.fr_be1.be","2005-08-29 16:45:00",4.804849
+"fr1.fr_es1.es","2005-08-29 16:45:00",96.682749
+"fr1.fr_it1.it","2005-08-29 16:45:00",3.330747
+"fr1.fr_pt1.pt","2005-08-29 16:45:00",1.916587
+"fr1.fr_uk1.uk","2005-08-29 16:45:00",28.144199
+"gr1.gr_be1.be","2005-08-29 16:45:00",0.895539
+"gr1.gr_de1.de","2005-08-29 16:45:00",4930.897339
+"gr1.gr_it1.it","2005-08-29 16:45:00",7.983659
+"it1.it_be1.be","2005-08-29 16:45:00",10.327772
+"it1.it_es1.es","2005-08-29 16:45:00",18.97108
+"it1.it_fr1.fr","2005-08-29 16:45:00",98.574706
+"it1.it_gr1.gr","2005-08-29 16:45:00",1.593313
+"pt1.pt_es1.es","2005-08-29 16:45:00",13.32428
+"pt1.pt_fr1.fr","2005-08-29 16:45:00",3.733925
+"pt1.pt_uk1.uk","2005-08-29 16:45:00",51.415678
+"uk1.uk_be1.be","2005-08-29 16:45:00",14.334868
+"uk1.uk_de1.de","2005-08-29 16:45:00",199.272255
+"uk1.uk_fr1.fr","2005-08-29 16:45:00",55.182499
+"uk1.uk_pt1.pt","2005-08-29 16:45:00",652.70225
+"be1.be_de1.de","2005-08-16 07:30:00",27.712568
+"be1.be_fr1.fr","2005-08-16 07:30:00",3.889573
+"be1.be_gr1.gr","2005-08-16 07:30:00",0.086821
+"be1.be_it1.it","2005-08-16 07:30:00",0.005398
+"be1.be_uk1.uk","2005-08-16 07:30:00",4.136991
+"de1.de_be1.be","2005-08-16 07:30:00",41.662646
+"de1.de_gr1.gr","2005-08-16 07:30:00",83.698837
+"de1.de_uk1.uk","2005-08-16 07:30:00",75.24126
+"es1.es_fr1.fr","2005-08-16 07:30:00",2.219729
+"es1.es_it1.it","2005-08-16 07:30:00",7.145139
+"es1.es_pt1.pt","2005-08-16 07:30:00",1.677266
+"fr1.fr_be1.be","2005-08-16 07:30:00",7.666459
+"fr1.fr_es1.es","2005-08-16 07:30:00",13.062805
+"fr1.fr_it1.it","2005-08-16 07:30:00",0.266986
+"fr1.fr_pt1.pt","2005-08-16 07:30:00",0.883506
+"fr1.fr_uk1.uk","2005-08-16 07:30:00",11.742382
+"gr1.gr_be1.be","2005-08-16 07:30:00",0.61562
+"gr1.gr_de1.de","2005-08-16 07:30:00",2001.396054
+"gr1.gr_it1.it","2005-08-16 07:30:00",2.294517
+"it1.it_be1.be","2005-08-16 07:30:00",3.148501
+"it1.it_es1.es","2005-08-16 07:30:00",5.216237
+"it1.it_fr1.fr","2005-08-16 07:30:00",3.382423
+"it1.it_gr1.gr","2005-08-16 07:30:00",4.762001
+"pt1.pt_es1.es","2005-08-16 07:30:00",0.328222
+"pt1.pt_fr1.fr","2005-08-16 07:30:00",0.544094
+"pt1.pt_uk1.uk","2005-08-16 07:30:00",6.946024
+"uk1.uk_be1.be","2005-08-16 07:30:00",14.723145
+"uk1.uk_de1.de","2005-08-16 07:30:00",101.817113
+"uk1.uk_fr1.fr","2005-08-16 07:30:00",13.165427
+"uk1.uk_pt1.pt","2005-08-16 07:30:00",59.307456
+"be1.be_de1.de","2005-05-07 23:45:00",3.00826
+"be1.be_fr1.fr","2005-05-07 23:45:00",18.221967
+"be1.be_gr1.gr","2005-05-07 23:45:00",0.173417
+"be1.be_it1.it","2005-05-07 23:45:00",0.00523
+"be1.be_uk1.uk","2005-05-07 23:45:00",1.61097
+"de1.de_be1.be","2005-05-07 23:45:00",56.899745
+"de1.de_gr1.gr","2005-05-07 23:45:00",631.542367
+"de1.de_uk1.uk","2005-05-07 23:45:00",210.597064
+"es1.es_fr1.fr","2005-05-07 23:45:00",4.532915
+"es1.es_it1.it","2005-05-07 23:45:00",3.393107
+"es1.es_pt1.pt","2005-05-07 23:45:00",12.377514
+"fr1.fr_be1.be","2005-05-07 23:45:00",0.414987
+"fr1.fr_es1.es","2005-05-07 23:45:00",6.300601
+"fr1.fr_it1.it","2005-05-07 23:45:00",3.127358
+"fr1.fr_pt1.pt","2005-05-07 23:45:00",0.651608
+"fr1.fr_uk1.uk","2005-05-07 23:45:00",33.277552
+"gr1.gr_be1.be","2005-05-07 23:45:00",0.199733
+"gr1.gr_de1.de","2005-05-07 23:45:00",933.371627
+"gr1.gr_it1.it","2005-05-07 23:45:00",1.692505
+"it1.it_be1.be","2005-05-07 23:45:00",27.93755
+"it1.it_es1.es","2005-05-07 23:45:00",46.131064
+"it1.it_fr1.fr","2005-05-07 23:45:00",16.271473
+"it1.it_gr1.gr","2005-05-07 23:45:00",739.540984
+"pt1.pt_es1.es","2005-05-07 23:45:00",0.777791
+"pt1.pt_fr1.fr","2005-05-07 23:45:00",0.319394
+"pt1.pt_uk1.uk","2005-05-07 23:45:00",176.696919
+"uk1.uk_be1.be","2005-05-07 23:45:00",1.521
+"uk1.uk_de1.de","2005-05-07 23:45:00",153.659587
+"uk1.uk_fr1.fr","2005-05-07 23:45:00",16.592104
+"uk1.uk_pt1.pt","2005-05-07 23:45:00",292.931055
+"be1.be_de1.de","2005-06-07 00:15:00",11.527804
+"be1.be_fr1.fr","2005-06-07 00:15:00",22.142524
+"be1.be_gr1.gr","2005-06-07 00:15:00",0.005173
+"be1.be_it1.it","2005-06-07 00:15:00",1.885266
+"be1.be_uk1.uk","2005-06-07 00:15:00",1.226966
+"de1.de_be1.be","2005-06-07 00:15:00",82.949555
+"de1.de_gr1.gr","2005-06-07 00:15:00",214.578575
+"de1.de_uk1.uk","2005-06-07 00:15:00",173.876359
+"es1.es_fr1.fr","2005-06-07 00:15:00",19.262246
+"es1.es_it1.it","2005-06-07 00:15:00",3.708902
+"es1.es_pt1.pt","2005-06-07 00:15:00",27.095824
+"fr1.fr_be1.be","2005-06-07 00:15:00",4.848429
+"fr1.fr_es1.es","2005-06-07 00:15:00",20.620681
+"fr1.fr_it1.it","2005-06-07 00:15:00",7.290271
+"fr1.fr_pt1.pt","2005-06-07 00:15:00",4.311982
+"fr1.fr_uk1.uk","2005-06-07 00:15:00",70.120616
+"gr1.gr_be1.be","2005-06-07 00:15:00",1.011151
+"gr1.gr_de1.de","2005-06-07 00:15:00",4235.315598
+"gr1.gr_it1.it","2005-06-07 00:15:00",2.605588
+"it1.it_be1.be","2005-06-07 00:15:00",29.244533
+"it1.it_es1.es","2005-06-07 00:15:00",18.239399
+"it1.it_fr1.fr","2005-06-07 00:15:00",38.556206
+"it1.it_gr1.gr","2005-06-07 00:15:00",25.842485
+"pt1.pt_es1.es","2005-06-07 00:15:00",3.852742
+"pt1.pt_fr1.fr","2005-06-07 00:15:00",0.065172
+"pt1.pt_uk1.uk","2005-06-07 00:15:00",367.749119
+"uk1.uk_be1.be","2005-06-07 00:15:00",12.040352
+"uk1.uk_de1.de","2005-06-07 00:15:00",162.255549
+"uk1.uk_fr1.fr","2005-06-07 00:15:00",58.607545
+"uk1.uk_pt1.pt","2005-06-07 00:15:00",433.376631
+"be1.be_de1.de","2005-06-22 20:30:00",20.497253
+"be1.be_fr1.fr","2005-06-22 20:30:00",20.782852
+"be1.be_gr1.gr","2005-06-22 20:30:00",0.005848
+"be1.be_it1.it","2005-06-22 20:30:00",0.090026
+"be1.be_uk1.uk","2005-06-22 20:30:00",2.056996
+"de1.de_be1.be","2005-06-22 20:30:00",27.225774
+"de1.de_gr1.gr","2005-06-22 20:30:00",272.761011
+"de1.de_uk1.uk","2005-06-22 20:30:00",199.828438
+"es1.es_fr1.fr","2005-06-22 20:30:00",22.522197
+"es1.es_it1.it","2005-06-22 20:30:00",13.393051
+"es1.es_pt1.pt","2005-06-22 20:30:00",5.932286
+"fr1.fr_be1.be","2005-06-22 20:30:00",15.616435
+"fr1.fr_es1.es","2005-06-22 20:30:00",29.365936
+"fr1.fr_it1.it","2005-06-22 20:30:00",2.797562
+"fr1.fr_pt1.pt","2005-06-22 20:30:00",10.604824
+"fr1.fr_uk1.uk","2005-06-22 20:30:00",45.283715
+"gr1.gr_be1.be","2005-06-22 20:30:00",1.087738
+"gr1.gr_de1.de","2005-06-22 20:30:00",4865.371323
+"gr1.gr_it1.it","2005-06-22 20:30:00",3.528006
+"it1.it_be1.be","2005-06-22 20:30:00",11.366813
+"it1.it_es1.es","2005-06-22 20:30:00",7.02441
+"it1.it_fr1.fr","2005-06-22 20:30:00",22.716083
+"it1.it_gr1.gr","2005-06-22 20:30:00",25.769665
+"pt1.pt_es1.es","2005-06-22 20:30:00",8.340278
+"pt1.pt_fr1.fr","2005-06-22 20:30:00",0.860282
+"pt1.pt_uk1.uk","2005-06-22 20:30:00",293.90633
+"uk1.uk_be1.be","2005-06-22 20:30:00",13.314889
+"uk1.uk_de1.de","2005-06-22 20:30:00",168.266278
+"uk1.uk_fr1.fr","2005-06-22 20:30:00",7.017269
+"uk1.uk_pt1.pt","2005-06-22 20:30:00",575.047825
+"be1.be_de1.de","2005-06-12 04:45:00",48.027576
+"be1.be_fr1.fr","2005-06-12 04:45:00",58.392573
+"be1.be_gr1.gr","2005-06-12 04:45:00",0.115611
+"be1.be_it1.it","2005-06-12 04:45:00",0.011134
+"be1.be_uk1.uk","2005-06-12 04:45:00",1.715897
+"de1.de_be1.be","2005-06-12 04:45:00",84.910002
+"de1.de_gr1.gr","2005-06-12 04:45:00",120.996306
+"de1.de_uk1.uk","2005-06-12 04:45:00",155.065369
+"es1.es_fr1.fr","2005-06-12 04:45:00",7.534709
+"es1.es_it1.it","2005-06-12 04:45:00",12.081569
+"es1.es_pt1.pt","2005-06-12 04:45:00",7.131193
+"fr1.fr_be1.be","2005-06-12 04:45:00",2.231369
+"fr1.fr_es1.es","2005-06-12 04:45:00",3.40216
+"fr1.fr_it1.it","2005-06-12 04:45:00",0.943786
+"fr1.fr_pt1.pt","2005-06-12 04:45:00",17.078504
+"fr1.fr_uk1.uk","2005-06-12 04:45:00",35.828258
+"gr1.gr_be1.be","2005-06-12 04:45:00",3.374157
+"gr1.gr_de1.de","2005-06-12 04:45:00",3976.311229
+"gr1.gr_it1.it","2005-06-12 04:45:00",0.046784
+"it1.it_be1.be","2005-06-12 04:45:00",12.296485
+"it1.it_es1.es","2005-06-12 04:45:00",18.296193
+"it1.it_fr1.fr","2005-06-12 04:45:00",4.634694
+"it1.it_gr1.gr","2005-06-12 04:45:00",0.255965
+"pt1.pt_es1.es","2005-06-12 04:45:00",1.012388
+"pt1.pt_fr1.fr","2005-06-12 04:45:00",0.612415
+"pt1.pt_uk1.uk","2005-06-12 04:45:00",254.932438
+"uk1.uk_be1.be","2005-06-12 04:45:00",0.815578
+"uk1.uk_de1.de","2005-06-12 04:45:00",83.263213
+"uk1.uk_fr1.fr","2005-06-12 04:45:00",16.271979
+"uk1.uk_pt1.pt","2005-06-12 04:45:00",324.332057
+"be1.be_de1.de","2005-07-30 02:00:00",40.205525
+"be1.be_fr1.fr","2005-07-30 02:00:00",17.725783
+"be1.be_gr1.gr","2005-07-30 02:00:00",0.013102
+"be1.be_uk1.uk","2005-07-30 02:00:00",8.324196
+"de1.de_be1.be","2005-07-30 02:00:00",29.881858
+"de1.de_gr1.gr","2005-07-30 02:00:00",62.722043
+"de1.de_uk1.uk","2005-07-30 02:00:00",216.185439
+"es1.es_fr1.fr","2005-07-30 02:00:00",22.856548
+"es1.es_it1.it","2005-07-30 02:00:00",11.847197
+"es1.es_pt1.pt","2005-07-30 02:00:00",15.523204
+"fr1.fr_be1.be","2005-07-30 02:00:00",7.983659
+"fr1.fr_es1.es","2005-07-30 02:00:00",3.740785
+"fr1.fr_it1.it","2005-07-30 02:00:00",0.502595
+"fr1.fr_pt1.pt","2005-07-30 02:00:00",0.199565
+"fr1.fr_uk1.uk","2005-07-30 02:00:00",12.856717
+"gr1.gr_be1.be","2005-07-30 02:00:00",1.1077
+"gr1.gr_de1.de","2005-07-30 02:00:00",3570.000337
+"gr1.gr_it1.it","2005-07-30 02:00:00",4.323903
+"it1.it_be1.be","2005-07-30 02:00:00",5.286414
+"it1.it_es1.es","2005-07-30 02:00:00",121.213021
+"it1.it_fr1.fr","2005-07-30 02:00:00",6.819559
+"it1.it_gr1.gr","2005-07-30 02:00:00",1.487767
+"pt1.pt_es1.es","2005-07-30 02:00:00",1.189291
+"pt1.pt_fr1.fr","2005-07-30 02:00:00",1.43311
+"pt1.pt_uk1.uk","2005-07-30 02:00:00",54.263399
+"uk1.uk_be1.be","2005-07-30 02:00:00",2.675709
+"uk1.uk_de1.de","2005-07-30 02:00:00",117.714986
+"uk1.uk_fr1.fr","2005-07-30 02:00:00",35.445042
+"uk1.uk_pt1.pt","2005-07-30 02:00:00",232.448872
+"be1.be_de1.de","2005-05-24 00:00:00",7.462676
+"be1.be_fr1.fr","2005-05-24 00:00:00",46.305493
+"be1.be_gr1.gr","2005-05-24 00:00:00",0.260969
+"be1.be_it1.it","2005-05-24 00:00:00",0.002643
+"be1.be_uk1.uk","2005-05-24 00:00:00",21.759308
+"de1.de_be1.be","2005-05-24 00:00:00",15.698308
+"de1.de_gr1.gr","2005-05-24 00:00:00",2032.807459
+"de1.de_uk1.uk","2005-05-24 00:00:00",550.498265
+"es1.es_fr1.fr","2005-05-24 00:00:00",20.892334
+"es1.es_it1.it","2005-05-24 00:00:00",99.741955
+"es1.es_pt1.pt","2005-05-24 00:00:00",16.16261
+"fr1.fr_be1.be","2005-05-24 00:00:00",2.836755
+"fr1.fr_es1.es","2005-05-24 00:00:00",10.259564
+"fr1.fr_it1.it","2005-05-24 00:00:00",2.967943
+"fr1.fr_pt1.pt","2005-05-24 00:00:00",2.573705
+"fr1.fr_uk1.uk","2005-05-24 00:00:00",40.368708
+"gr1.gr_be1.be","2005-05-24 00:00:00",2.010099
+"gr1.gr_de1.de","2005-05-24 00:00:00",4563.719698
+"gr1.gr_it1.it","2005-05-24 00:00:00",3.384785
+"it1.it_be1.be","2005-05-24 00:00:00",24.598593
+"it1.it_es1.es","2005-05-24 00:00:00",123.836434
+"it1.it_fr1.fr","2005-05-24 00:00:00",113.240327
+"it1.it_gr1.gr","2005-05-24 00:00:00",23.121173
+"pt1.pt_es1.es","2005-05-24 00:00:00",5.621496
+"pt1.pt_fr1.fr","2005-05-24 00:00:00",0.128713
+"pt1.pt_uk1.uk","2005-05-24 00:00:00",382.279278
+"uk1.uk_be1.be","2005-05-24 00:00:00",2.423005
+"uk1.uk_de1.de","2005-05-24 00:00:00",225.272469
+"uk1.uk_fr1.fr","2005-05-24 00:00:00",29.664524
+"uk1.uk_pt1.pt","2005-05-24 00:00:00",515.544459
+"be1.be_de1.de","2005-07-08 22:45:00",41.721914
+"be1.be_fr1.fr","2005-07-08 22:45:00",22.271518
+"be1.be_uk1.uk","2005-07-08 22:45:00",0.565237
+"de1.de_be1.be","2005-07-08 22:45:00",18.116253
+"de1.de_gr1.gr","2005-07-08 22:45:00",84.363659
+"de1.de_uk1.uk","2005-07-08 22:45:00",198.657985
+"es1.es_fr1.fr","2005-07-08 22:45:00",26.077588
+"es1.es_it1.it","2005-07-08 22:45:00",10.734268
+"es1.es_pt1.pt","2005-07-08 22:45:00",3.212886
+"fr1.fr_be1.be","2005-07-08 22:45:00",2.165579
+"fr1.fr_es1.es","2005-07-08 22:45:00",49.61386
+"fr1.fr_it1.it","2005-07-08 22:45:00",7.861918
+"fr1.fr_pt1.pt","2005-07-08 22:45:00",1.42833
+"fr1.fr_uk1.uk","2005-07-08 22:45:00",175.702188
+"gr1.gr_be1.be","2005-07-08 22:45:00",2.705961
+"gr1.gr_de1.de","2005-07-08 22:45:00",4139.070272
+"gr1.gr_it1.it","2005-07-08 22:45:00",2.765173
+"it1.it_be1.be","2005-07-08 22:45:00",22.960183
+"it1.it_es1.es","2005-07-08 22:45:00",191.877562
+"it1.it_fr1.fr","2005-07-08 22:45:00",10.385578
+"it1.it_gr1.gr","2005-07-08 22:45:00",0.905886
+"pt1.pt_es1.es","2005-07-08 22:45:00",6.78166
+"pt1.pt_fr1.fr","2005-07-08 22:45:00",0.162677
+"pt1.pt_uk1.uk","2005-07-08 22:45:00",131.320816
+"uk1.uk_be1.be","2005-07-08 22:45:00",0.836384
+"uk1.uk_de1.de","2005-07-08 22:45:00",547.487643
+"uk1.uk_fr1.fr","2005-07-08 22:45:00",102.387861
+"uk1.uk_pt1.pt","2005-07-08 22:45:00",381.91698
diff --git a/src/forecaster/tests/data/inject_samples.py b/src/forecaster/tests/data/inject_samples.py
new file mode 100644
index 0000000000000000000000000000000000000000..e77cd950828f27e5557ea575d7e9c9b55aabb315
--- /dev/null
+++ b/src/forecaster/tests/data/inject_samples.py
@@ -0,0 +1,59 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging, sys
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.monitoring_pb2 import Kpi, KpiDescriptor
+from common.tools.timestamp.Converters import timestamp_datetime_to_float
+from forecaster.tests.Tools import read_csv
+from monitoring.client.MonitoringClient import MonitoringClient
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+logging.getLogger('monitoring.client.MonitoringClient').setLevel(logging.INFO)
+
+CSV_DATA_FILE = 'src/forecaster/tests/data/dataset.csv'
+
+def main() -> int:
+ monitoring_client = MonitoringClient()
+ link_uuid_to_kpi_kpi_uuid = dict()
+
+ df = read_csv(CSV_DATA_FILE)
+ for row in df.itertuples(index=False):
+ link_uuid = row.link_id
+ timestamp = timestamp_datetime_to_float(row.timestamp)
+ used_capacity_gbps = row.used_capacity_gbps
+
+ if link_uuid in link_uuid_to_kpi_kpi_uuid:
+ kpi_uuid = link_uuid_to_kpi_kpi_uuid[link_uuid]
+ else:
+ kpi_descriptor = KpiDescriptor()
+ kpi_descriptor.kpi_description = 'Used Capacity in Link: {:s}'.format(link_uuid)
+ kpi_descriptor.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS
+ kpi_descriptor.link_id.link_uuid.uuid = link_uuid # pylint: disable=no-member
+ kpi_id = monitoring_client.SetKpi(kpi_descriptor)
+ kpi_uuid = kpi_id.kpi_id.uuid
+ link_uuid_to_kpi_kpi_uuid[link_uuid] = kpi_uuid
+
+ kpi = Kpi()
+ kpi.kpi_id.kpi_id.uuid = kpi_uuid # pylint: disable=no-member
+ kpi.timestamp.timestamp = timestamp # pylint: disable=no-member
+ kpi.kpi_value.floatVal = used_capacity_gbps # pylint: disable=no-member
+ monitoring_client.IncludeKpi(kpi)
+
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/forecaster/tests/data/service.json b/src/forecaster/tests/data/service.json
new file mode 100644
index 0000000000000000000000000000000000000000..f4482f44eab4ccb43907015cc351c5e44304cf4e
--- /dev/null
+++ b/src/forecaster/tests/data/service.json
@@ -0,0 +1,22 @@
+{
+ "services": [
+ {
+ "service_id": {
+ "context_id": {"context_uuid": {"uuid": "admin"}},
+ "service_uuid": {"uuid": "svc:pt1.pt/client:1==gr1.gr/client:3"}
+ },
+ "service_type": 1,
+ "service_status": {"service_status": 1},
+ "service_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "client:1"}},
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "client:3"}}
+ ],
+ "service_constraints": [
+ {"sla_capacity": {"capacity_gbps": 25.0}},
+ {"sla_latency": {"e2e_latency_ms": 20.0}},
+ {"schedule": {"start_timestamp": 1700053997, "duration_days": 1.5}}
+ ],
+ "service_config": {"config_rules": []}
+ }
+ ]
+}
diff --git a/src/forecaster/tests/data/topology.json b/src/forecaster/tests/data/topology.json
new file mode 100644
index 0000000000000000000000000000000000000000..f36fbd7d03a93db61a7233e084f60e7680a54606
--- /dev/null
+++ b/src/forecaster/tests/data/topology.json
@@ -0,0 +1,200 @@
+{
+ "contexts": [
+ {"context_id": {"context_uuid": {"uuid": "admin"}}, "name": "admin"}
+ ],
+ "topologies": [
+ {"topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, "name": "admin"}
+ ],
+ "devices": [
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "be1.be", "type": "copper"}, {"uuid": "pt1.pt", "type": "copper"}, {"uuid": "uk1.uk", "type": "copper"},
+ {"uuid": "es1.es", "type": "copper"}, {"uuid": "it1.it", "type": "copper"}
+ ]}}}
+ ]}},
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "de1.de", "type": "copper"}, {"uuid": "gr1.gr", "type": "copper"}, {"uuid": "uk1.uk", "type": "copper"},
+ {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "it1.it", "type": "copper"}
+ ]}}}
+ ]}},
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "de1.de", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "be1.be", "type": "copper"},
+ {"uuid": "pt1.pt", "type": "copper"}
+ ]}}}
+ ]}},
+ {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "uk1.uk", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}, {"uuid": "gr1.gr", "type": "copper"}
+ ]}}}
+ ]}},
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "uk1.uk", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "es1.es", "type": "copper"}
+ ]}}}
+ ]}},
+ {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "it1.it", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "pt1.pt", "type": "copper"}
+ ]}}}
+ ]}},
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "es1.es", "type": "copper"}, {"uuid": "fr1.fr", "type": "copper"}, {"uuid": "be1.be", "type": "copper"},
+ {"uuid": "gr1.gr", "type": "copper"}
+ ]}}}
+ ]}},
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "device_type": "emu-packet-router", "device_operational_status": 1, "device_drivers": [0], "device_config": {"config_rules": [
+ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "client:1", "type": "copper"}, {"uuid": "client:2", "type": "copper"}, {"uuid": "client:3", "type": "copper"},
+ {"uuid": "it1.it", "type": "copper"}, {"uuid": "de1.de", "type": "copper"}, {"uuid": "be1.be", "type": "copper"}
+ ]}}}
+ ]}}
+ ],
+ "links": [
+ {"link_id": {"link_uuid": {"uuid": "fr1.fr_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 4.804849}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "be1.be"}},
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "uk1.uk_fr1.fr"}}, "attributes": {"total_capacity_gbps": 300, "used_capacity_gbps": 55.182499}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "fr1.fr"}},
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "uk1.uk_de1.de"}}, "attributes": {"total_capacity_gbps": 600, "used_capacity_gbps": 199.272255}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "de1.de"}},
+ {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "uk1.uk_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 14.334868}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "be1.be"}},
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "pt1.pt_uk1.uk"}}, "attributes": {"total_capacity_gbps": 400, "used_capacity_gbps": 51.415678}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "uk1.uk"}},
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "pt1.pt_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 3.733925}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "fr1.fr"}},
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "pt1.pt_es1.es"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 13.32428}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "es1.es"}},
+ {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "pt1.pt"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "it1.it_gr1.gr"}}, "attributes": {"total_capacity_gbps": 800, "used_capacity_gbps": 1.593313}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "gr1.gr"}},
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "it1.it"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "it1.it_fr1.fr"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 98.574706}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "fr1.fr"}},
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "it1.it"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "it1.it_es1.es"}}, "attributes": {"total_capacity_gbps": 300, "used_capacity_gbps": 18.97108}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "es1.es"}},
+ {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "it1.it"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "it1.it_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 10.327772}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "be1.be"}},
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "it1.it"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "gr1.gr_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 7.983659}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "it1.it"}},
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "gr1.gr_de1.de"}}, "attributes": {"total_capacity_gbps": 5000, "used_capacity_gbps": 4930.897339}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "de1.de"}},
+ {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "gr1.gr_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.895539}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "be1.be"}},
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "gr1.gr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "fr1.fr_uk1.uk"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 28.144199}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "uk1.uk"}},
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "fr1.fr_pt1.pt"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 1.916587}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "pt1.pt"}},
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "fr1.fr_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 3.330747}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "it1.it"}},
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "fr1.fr_es1.es"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 96.682749}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "es1.es"}},
+ {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "fr1.fr"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "es1.es_pt1.pt"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 5.643483}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "pt1.pt"}},
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "es1.es"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "es1.es_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 15.353667}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "it1.it"}},
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "es1.es"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "es1.es_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 20.517778}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "es1.es"}}, "endpoint_uuid": {"uuid": "fr1.fr"}},
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "es1.es"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "de1.de_uk1.uk"}}, "attributes": {"total_capacity_gbps": 600, "used_capacity_gbps": 239.446965}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "uk1.uk"}},
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "de1.de"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "de1.de_gr1.gr"}}, "attributes": {"total_capacity_gbps": 2100, "used_capacity_gbps": 110.602237}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "gr1.gr"}},
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "de1.de"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "de1.de_be1.be"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 57.709307}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "be1.be"}},
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "de1.de"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "uk1.uk_pt1.pt"}}, "attributes": {"total_capacity_gbps": 800, "used_capacity_gbps": 652.70225}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "pt1.pt"}},
+ {"device_id": {"device_uuid": {"uuid": "pt1.pt"}}, "endpoint_uuid": {"uuid": "uk1.uk"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "be1.be_uk1.uk"}}, "attributes": {"total_capacity_gbps": 200, "used_capacity_gbps": 8.252107}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "uk1.uk"}},
+ {"device_id": {"device_uuid": {"uuid": "uk1.uk"}}, "endpoint_uuid": {"uuid": "be1.be"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "be1.be_it1.it"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.357069}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "it1.it"}},
+ {"device_id": {"device_uuid": {"uuid": "it1.it"}}, "endpoint_uuid": {"uuid": "be1.be"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "be1.be_de1.de"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 20.400142}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "de1.de"}},
+ {"device_id": {"device_uuid": {"uuid": "de1.de"}}, "endpoint_uuid": {"uuid": "be1.be"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "be1.be_fr1.fr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 31.346514}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "fr1.fr"}},
+ {"device_id": {"device_uuid": {"uuid": "fr1.fr"}}, "endpoint_uuid": {"uuid": "be1.be"}}
+ ]},
+ {"link_id": {"link_uuid": {"uuid": "be1.be_gr1.gr"}}, "attributes": {"total_capacity_gbps": 100, "used_capacity_gbps": 0.026822}, "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "be1.be"}}, "endpoint_uuid": {"uuid": "gr1.gr"}},
+ {"device_id": {"device_uuid": {"uuid": "gr1.gr"}}, "endpoint_uuid": {"uuid": "be1.be"}}
+ ]}
+ ]
+}
diff --git a/src/forecaster/tests/test_unitary.py b/src/forecaster/tests/test_unitary.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d53e68df369113610f414d58664bf67e2e5d0b9
--- /dev/null
+++ b/src/forecaster/tests/test_unitary.py
@@ -0,0 +1,143 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, pandas, pytest #, json
+from typing import Dict, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
+from common.proto.context_pb2 import ContextId, TopologyId
+from common.proto.forecaster_pb2 import ForecastLinkCapacityRequest, ForecastTopologyCapacityRequest
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.monitoring_pb2 import KpiDescriptor
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from forecaster.client.ForecasterClient import ForecasterClient
+from forecaster.tests.Tools import compose_descriptors, read_csv
+from monitoring.client.MonitoringClient import MonitoringClient
+from .MockService_Dependencies import MockService_Dependencies
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+ # be careful, order of symbols is important here!
+ mock_service, forecaster_service, context_client, monitoring_client, forecaster_client)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+logging.getLogger('common.tests.InMemoryObjectDatabase').setLevel(logging.INFO)
+logging.getLogger('common.tests.InMemoryTimeSeriesDatabase').setLevel(logging.INFO)
+logging.getLogger('common.tests.MockServicerImpl_Context').setLevel(logging.INFO)
+logging.getLogger('common.tests.MockServicerImpl_Monitoring').setLevel(logging.INFO)
+logging.getLogger('context.client.ContextClient').setLevel(logging.INFO)
+logging.getLogger('monitoring.client.MonitoringClient').setLevel(logging.INFO)
+
+JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID)
+ADMIN_TOPOLOGY_ID = TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=JSON_ADMIN_CONTEXT_ID))
+
+CSV_DATA_FILE = 'forecaster/tests/data/dataset.csv'
+#DESC_DATS_FILE = 'forecaster/tests/data/descriptor.json'
+
+@pytest.fixture(scope='session')
+def scenario() -> Tuple[pandas.DataFrame, Dict]:
+ df = read_csv(CSV_DATA_FILE)
+ descriptors = compose_descriptors(df)
+ #with open(DESC_DATS_FILE, 'w', encoding='UTF-8') as f:
+ # f.write(json.dumps(descriptors))
+ yield df, descriptors
+
+def test_prepare_environment(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name
+ mock_service : MockService_Dependencies, # pylint: disable=redefined-outer-name
+ scenario : Tuple[pandas.DataFrame, Dict] # pylint: disable=redefined-outer-name
+) -> None:
+ df, descriptors = scenario
+
+ validate_empty_scenario(context_client)
+ descriptor_loader = DescriptorLoader(descriptors=descriptors, context_client=context_client)
+ results = descriptor_loader.process()
+ check_descriptor_load_results(results, descriptor_loader)
+ descriptor_loader.validate()
+
+ # Verify the scenario has no services/slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 0
+ assert len(response.slice_ids) == 0
+
+ for link in descriptors['links']:
+ link_uuid = link['link_id']['link_uuid']['uuid']
+ kpi_descriptor = KpiDescriptor()
+ kpi_descriptor.kpi_id.kpi_id.uuid = link_uuid # pylint: disable=no-member
+ kpi_descriptor.kpi_description = 'Used Capacity in Link: {:s}'.format(link_uuid)
+ kpi_descriptor.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS
+ kpi_descriptor.link_id.link_uuid.uuid = link_uuid # pylint: disable=no-member
+ monitoring_client.SetKpi(kpi_descriptor)
+
+ mock_service.monitoring_servicer.ts_db._data = df.rename(columns={
+ 'link_id': 'kpi_uuid',
+ 'used_capacity_gbps': 'value'
+ })
+
+def test_forecast_link(
+ context_client : ContextClient,
+ forecaster_client : ForecasterClient,
+): # pylint: disable=redefined-outer-name
+ topology = context_client.GetTopology(ADMIN_TOPOLOGY_ID)
+ link_id = topology.link_ids[0]
+ forecast_request = ForecastLinkCapacityRequest()
+ forecast_request.link_id.CopyFrom(link_id) # pylint: disable=no-member
+ forecast_request.forecast_window_seconds = 10 * 24 * 60 * 60 # 10 days in seconds
+ forecast_reply = forecaster_client.ForecastLinkCapacity(forecast_request)
+ assert forecast_reply.link_id == link_id
+ assert forecast_reply.total_capacity_gbps >= forecast_reply.current_used_capacity_gbps
+ # TODO: validate forecasted values; might be increasing or decreasing
+
+def test_forecast_topology(
+ context_client : ContextClient,
+ forecaster_client : ForecasterClient,
+): # pylint: disable=redefined-outer-name
+ forecast_request = ForecastTopologyCapacityRequest()
+ forecast_request.topology_id.CopyFrom(ADMIN_TOPOLOGY_ID) # pylint: disable=no-member
+ forecast_request.forecast_window_seconds = 10 * 24 * 60 * 60 # 10 days in seconds
+ forecast_reply = forecaster_client.ForecastTopologyCapacity(forecast_request)
+
+ topology = context_client.GetTopology(ADMIN_TOPOLOGY_ID)
+ assert len(forecast_reply.link_capacities) == len(topology.link_ids)
+ reply_link_uuid__to__link_capacity = {
+ link_capacity.link_id.link_uuid.uuid : link_capacity
+ for link_capacity in forecast_reply.link_capacities
+ }
+ for link_id in topology.link_ids:
+ link_uuid = link_id.link_uuid.uuid
+ assert link_uuid in reply_link_uuid__to__link_capacity
+ link_capacity_forecast = reply_link_uuid__to__link_capacity[link_uuid]
+ assert link_capacity_forecast.link_id == link_id
+ assert link_capacity_forecast.total_capacity_gbps >= link_capacity_forecast.current_used_capacity_gbps
+ # TODO: validate forecasted values; might be increasing or decreasing
+
+def test_cleanup_environment(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ scenario : Tuple[pandas.DataFrame, Dict] # pylint: disable=redefined-outer-name
+) -> None:
+ _, descriptors = scenario
+
+ # Verify the scenario has no services/slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 0
+ assert len(response.slice_ids) == 0
+
+ # Load descriptors and validate the base scenario
+ descriptor_loader = DescriptorLoader(descriptors=descriptors, context_client=context_client)
+ descriptor_loader.validate()
+ descriptor_loader.unload()
+ validate_empty_scenario(context_client)
diff --git a/src/monitoring/service/ManagementDBTools.py b/src/monitoring/service/ManagementDBTools.py
index 6c0a69e0ec6ec22a9fff24a1de073f2df03e2115..d2a63685f17b58c0f92acb4ef9606f3df929b9e0 100644
--- a/src/monitoring/service/ManagementDBTools.py
+++ b/src/monitoring/service/ManagementDBTools.py
@@ -36,12 +36,13 @@ class ManagementDB():
kpi_id INTEGER PRIMARY KEY AUTOINCREMENT,
kpi_description TEXT,
kpi_sample_type INTEGER,
- device_id INTEGER,
- endpoint_id INTEGER,
- service_id INTEGER,
- slice_id INTEGER,
- connection_id INTEGER,
- monitor_flag INTEGER
+ device_id STRING,
+ endpoint_id STRING,
+ service_id STRING,
+ slice_id STRING,
+ connection_id STRING,
+ link_id STRING,
+ monitor_flag STRING
);
""")
LOGGER.debug("KPI table created in the ManagementDB")
@@ -87,13 +88,13 @@ class ManagementDB():
LOGGER.debug(f"Alarm table cannot be created in the ManagementDB. {e}")
raise Exception
- def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id):
+ def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id,link_id):
try:
c = self.client.cursor()
- c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ? AND slice_id is ? AND connection_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id,slice_id,connection_id))
+ c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ? AND slice_id is ? AND connection_id is ? AND link_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id,slice_id,connection_id,link_id))
data=c.fetchone()
if data is None:
- c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id) VALUES (?,?,?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id))
+ c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id,link_id) VALUES (?,?,?,?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id,link_id))
self.client.commit()
kpi_id = c.lastrowid
LOGGER.debug(f"KPI {kpi_id} succesfully inserted in the ManagementDB")
diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py
index fd9c092b2d061865cb8c3d625eef8b5d2ef0eab7..ad20b5afaa6d8510e0e39267ef6e1d71782e3e22 100644
--- a/src/monitoring/service/MetricsDBTools.py
+++ b/src/monitoring/service/MetricsDBTools.py
@@ -96,6 +96,7 @@ class MetricsDB():
'service_id SYMBOL,' \
'slice_id SYMBOL,' \
'connection_id SYMBOL,' \
+ 'link_id SYMBOL,' \
'timestamp TIMESTAMP,' \
'kpi_value DOUBLE)' \
'TIMESTAMP(timestamp);'
@@ -106,7 +107,7 @@ class MetricsDB():
LOGGER.debug(f"Table {self.table} cannot be created. {e}")
raise Exception
- def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, slice_id, connection_id, kpi_value):
+ def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, slice_id, connection_id, link_id, kpi_value):
device_name = self.name_mapping.get_device_name(device_id) or ''
endpoint_name = self.name_mapping.get_endpoint_name(endpoint_id) or ''
@@ -125,7 +126,9 @@ class MetricsDB():
'endpoint_name': endpoint_name,
'service_id': service_id,
'slice_id': slice_id,
- 'connection_id': connection_id,},
+ 'connection_id': connection_id,
+ 'link_id': link_id,
+ },
columns={
'kpi_value': kpi_value},
at=datetime.datetime.fromtimestamp(time))
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index 3bfef65ff0c52f110b9a091e96b6f6b97dfa79cf..608b0bad9d5869cde35be60157fec9e0a6d34c90 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -65,13 +65,14 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
kpi_service_id = request.service_id.service_uuid.uuid
kpi_slice_id = request.slice_id.slice_uuid.uuid
kpi_connection_id = request.connection_id.connection_uuid.uuid
+ kpi_link_id = request.link_id.link_uuid.uuid
if request.kpi_id.kpi_id.uuid != "":
response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
# Here the code to modify an existing kpi
else:
data = self.management_db.insert_KPI(
kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, kpi_slice_id,
- kpi_connection_id)
+ kpi_connection_id, kpi_link_id)
response.kpi_id.uuid = str(data)
return response
@@ -100,6 +101,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
kpiDescriptor.service_id.service_uuid.uuid = str(kpi_db[5])
kpiDescriptor.slice_id.slice_uuid.uuid = str(kpi_db[6])
kpiDescriptor.connection_id.connection_uuid.uuid = str(kpi_db[7])
+ kpiDescriptor.link_id.link_uuid.uuid = str(kpi_db[8])
return kpiDescriptor
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
@@ -117,6 +119,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
kpi_descriptor.service_id.service_uuid.uuid = str(item[5])
kpi_descriptor.slice_id.slice_uuid.uuid = str(item[6])
kpi_descriptor.connection_id.connection_uuid.uuid = str(item[7])
+ kpi_descriptor.link_id.link_uuid.uuid = str(item[8])
kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor)
return kpi_descriptor_list
@@ -135,11 +138,12 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
serviceId = kpiDescriptor.service_id.service_uuid.uuid
sliceId = kpiDescriptor.slice_id.slice_uuid.uuid
connectionId = kpiDescriptor.connection_id.connection_uuid.uuid
+ linkId = kpiDescriptor.link_id.link_uuid.uuid
time_stamp = request.timestamp.timestamp
kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
# Build the structure to be included as point in the MetricsDB
- self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, kpi_value)
+ self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, linkId, kpi_value)
return Empty()
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index ff19e231e1e6dfee78d5bc1ae71f170990d11609..f2c2215970545e1f6583598bdc5ef88299ba76d2 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -22,16 +22,18 @@ from apscheduler.executors.pool import ProcessPoolExecutor
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.base import STATE_STOPPED
from grpc._channel import _MultiThreadedRendezvous
-from common.Constants import ServiceNameEnum
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, ServiceNameEnum
from common.Settings import (
ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
-from common.proto.context_pb2 import ConfigActionEnum, DeviceOperationalStatusEnum, EventTypeEnum, DeviceEvent, Device, Empty
+from common.proto.context_pb2 import ConfigActionEnum, Context, ContextId, DeviceOperationalStatusEnum, EventTypeEnum, DeviceEvent, Device, Empty, Topology, TopologyId
from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
from common.proto.kpi_sample_types_pb2 import KpiSampleType
from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, SubsDescriptor, SubsList, AlarmID, \
AlarmDescriptor, AlarmList, KpiDescriptorList, SubsResponse, AlarmResponse, RawKpiTable #, Kpi, KpiList
from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
from common.tools.object_factory.ConfigRule import json_config_rule_set
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Topology import json_topology, json_topology_id
from common.tools.service.GenericGrpcService import GenericGrpcService
from common.tools.timestamp.Converters import timestamp_utcnow_to_float #, timestamp_string_to_float
from context.client.ContextClient import ContextClient
@@ -224,6 +226,18 @@ def ingestion_data(kpi_id_int):
kpi_value)
sleep(0.1)
+##################################################
+# Prepare Environment, should be the first test
+##################################################
+
+def test_prepare_environment(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument
+):
+ context_id = json_context_id(DEFAULT_CONTEXT_NAME)
+ context_client.SetContext(Context(**json_context(DEFAULT_CONTEXT_NAME)))
+ context_client.SetTopology(Topology(**json_topology(DEFAULT_TOPOLOGY_NAME, context_id=context_id)))
+
+
###########################
# Tests Implementation
###########################
@@ -428,10 +442,11 @@ def test_managementdb_tools_kpis(management_db): # pylint: disable=redefined-out
kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid # pylint: disable=maybe-no-member
kpi_slice_id = _create_kpi_request.slice_id.slice_uuid.uuid # pylint: disable=maybe-no-member
kpi_connection_id = _create_kpi_request.connection_id.connection_uuid.uuid # pylint: disable=maybe-no-member
+ link_id = _create_kpi_request.link_id.link_uuid.uuid # pylint: disable=maybe-no-member
_kpi_id = management_db.insert_KPI(
kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id,
- kpi_slice_id, kpi_connection_id)
+ kpi_slice_id, kpi_connection_id, link_id)
assert isinstance(_kpi_id, int)
response = management_db.get_KPI(_kpi_id)
@@ -626,3 +641,14 @@ def test_listen_events(
events_collector.stop()
LOGGER.warning('test_listen_events end')
+
+
+##################################################
+# Cleanup Environment, should be the last test
+##################################################
+def test_cleanup_environment(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument
+):
+ context_id = json_context_id(DEFAULT_CONTEXT_NAME)
+ context_client.RemoveTopology(TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=context_id)))
+ context_client.RemoveContext(ContextId(**context_id))
diff --git a/src/pathcomp/.gitlab-ci.yml b/src/pathcomp/.gitlab-ci.yml
index 20ec4e728837b87e061b2ecffa4b7549c658258f..05113d0feab441543d6567f3eb3ab1cacac3a971 100644
--- a/src/pathcomp/.gitlab-ci.yml
+++ b/src/pathcomp/.gitlab-ci.yml
@@ -60,6 +60,7 @@ unit_test pathcomp-backend:
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend image is not in the system"; fi
- if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend image is not in the system"; fi
+ - docker container prune -f
script:
- docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
- docker ps -a
@@ -106,6 +107,7 @@ unit_test pathcomp-frontend:
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
- if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend image is not in the system"; fi
- if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend image is not in the system"; fi
+ - docker container prune -f
script:
- docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
- docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
@@ -131,7 +133,7 @@ unit_test pathcomp-frontend:
- docker logs ${IMAGE_NAME}-backend
- >
docker exec -i ${IMAGE_NAME}-frontend bash -c
- "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/frontend/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml"
+ "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_unitary.py $IMAGE_NAME/frontend/tests/test_unitary_pathcomp_forecaster.py"
- docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
diff --git a/src/pathcomp/frontend/Config.py b/src/pathcomp/frontend/Config.py
index 714eb7278074ac860caa76dc3ed8b4a40ae9f192..61aa31a8316a67cdba4b214fc0a1ff4b3843b003 100644
--- a/src/pathcomp/frontend/Config.py
+++ b/src/pathcomp/frontend/Config.py
@@ -13,6 +13,7 @@
# limitations under the License.
import os
+from common.Settings import get_setting
DEFAULT_PATHCOMP_BACKEND_SCHEME = 'http'
DEFAULT_PATHCOMP_BACKEND_HOST = '127.0.0.1'
@@ -37,3 +38,13 @@ PATHCOMP_BACKEND_PORT = int(os.environ.get('PATHCOMP_BACKEND_PORT', backend_port
BACKEND_URL = '{:s}://{:s}:{:d}{:s}'.format(
PATHCOMP_BACKEND_SCHEME, PATHCOMP_BACKEND_HOST, PATHCOMP_BACKEND_PORT, PATHCOMP_BACKEND_BASEURL)
+
+
+SETTING_NAME_ENABLE_FORECASTER = 'ENABLE_FORECASTER'
+TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'}
+
+def is_forecaster_enabled() -> bool:
+ is_enabled = get_setting(SETTING_NAME_ENABLE_FORECASTER, default=None)
+ if is_enabled is None: return False
+ str_is_enabled = str(is_enabled).upper()
+ return str_is_enabled in TRUE_VALUES
diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile
index 08fe50e0f7443ad71ecabf6fdb337539cc07d203..955844cf4d80b39fc0913c9c523fd1267ca0fb1d 100644
--- a/src/pathcomp/frontend/Dockerfile
+++ b/src/pathcomp/frontend/Dockerfile
@@ -66,6 +66,9 @@ COPY src/context/__init__.py context/__init__.py
COPY src/context/client/. context/client/
COPY src/device/__init__.py device/__init__.py
COPY src/device/client/. device/client/
+COPY src/forecaster/. forecaster/
+COPY src/monitoring/__init__.py monitoring/__init__.py
+COPY src/monitoring/client/. monitoring/client/
COPY src/service/__init__.py service/__init__.py
COPY src/service/client/. service/client/
COPY src/slice/__init__.py slice/__init__.py
diff --git a/src/pathcomp/frontend/requirements.in b/src/pathcomp/frontend/requirements.in
index d99d4cd02b1a9fa39633b35d998b228b3b9e9fc7..c96d7425c5d2e32d43559b8b138de8200db40eac 100644
--- a/src/pathcomp/frontend/requirements.in
+++ b/src/pathcomp/frontend/requirements.in
@@ -13,4 +13,6 @@
# limitations under the License.
+pandas==1.5.*
requests==2.27.1
+scikit-learn==1.1.*
diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
index 784a09e32c2dbb6f6cfcbbbe51048e49ad9a7005..5d3d352d7ddf0638b3d9a3894eb1f9ac3f91c4fb 100644
--- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
+++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
@@ -13,18 +13,20 @@
# limitations under the License.
import grpc, logging, threading
-from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
+#from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
-from common.proto.context_pb2 import ContextId, Empty, TopologyId
+#from common.proto.context_pb2 import ContextId, Empty, TopologyId
from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer
-from common.tools.context_queries.Device import get_devices_in_topology
-from common.tools.context_queries.Link import get_links_in_topology
-from common.tools.context_queries.InterDomain import is_inter_domain
+#from common.tools.context_queries.Device import get_devices_in_topology
+#from common.tools.context_queries.Link import get_links_in_topology
+#from common.tools.context_queries.InterDomain import is_inter_domain
from common.tools.grpc.Tools import grpc_message_to_json_string
-from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Topology import json_topology_id
-from context.client.ContextClient import ContextClient
+from pathcomp.frontend.Config import is_forecaster_enabled
+#from common.tools.object_factory.Context import json_context_id
+#from common.tools.object_factory.Topology import json_topology_id
+#from context.client.ContextClient import ContextClient
+from pathcomp.frontend.service.TopologyTools import get_pathcomp_topology_details
from pathcomp.frontend.service.algorithms.Factory import get_algorithm
LOGGER = logging.getLogger(__name__)
@@ -43,9 +45,7 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
def Compute(self, request : PathCompRequest, context : grpc.ServicerContext) -> PathCompReply:
LOGGER.debug('[Compute] begin ; request = {:s}'.format(grpc_message_to_json_string(request)))
- context_client = ContextClient()
-
- context_id = json_context_id(DEFAULT_CONTEXT_NAME)
+ #context_client = ContextClient()
# TODO: improve definition of topologies; for interdomain the current topology design might be not convenient
#if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids):
# #devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
@@ -56,10 +56,11 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
# # TODO: add contexts, topologies, and membership of devices/links in topologies
# #devices = context_client.ListDevices(Empty())
# #links = context_client.ListLinks(Empty())
+ # context_id = json_context_id(DEFAULT_CONTEXT_NAME)
# topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id)
- topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id)
- topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id))
+ allow_forecasting = is_forecaster_enabled()
+ topology_details = get_pathcomp_topology_details(request, allow_forecasting=allow_forecasting)
algorithm = get_algorithm(request)
algorithm.add_devices(topology_details.devices)
diff --git a/src/pathcomp/frontend/service/TopologyTools.py b/src/pathcomp/frontend/service/TopologyTools.py
new file mode 100644
index 0000000000000000000000000000000000000000..778cd59acce1eeeeeb1b05bcc3a03f09a9a46a8e
--- /dev/null
+++ b/src/pathcomp/frontend/service/TopologyTools.py
@@ -0,0 +1,98 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, math
+from typing import Dict, Optional
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_environment_variables, get_env_var_name
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException
+from common.proto.context_pb2 import Constraint_Schedule, Service, TopologyDetails
+from common.proto.forecaster_pb2 import ForecastLinkCapacityReply, ForecastTopologyCapacityRequest
+from common.proto.pathcomp_pb2 import PathCompRequest
+from common.tools.context_queries.Topology import get_topology_details
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from forecaster.client.ForecasterClient import ForecasterClient
+
+LOGGER = logging.getLogger(__name__)
+
+def get_service_schedule(service : Service) -> Optional[Constraint_Schedule]:
+ for constraint in service.service_constraints:
+ if constraint.WhichOneof('constraint') != 'schedule': continue
+ return constraint.schedule
+ return None
+
+def get_pathcomp_topology_details(request : PathCompRequest, allow_forecasting : bool = False) -> TopologyDetails:
+ context_client = ContextClient()
+ topology_details = get_topology_details(
+ context_client, DEFAULT_TOPOLOGY_NAME, context_uuid=DEFAULT_CONTEXT_NAME, rw_copy=True
+ )
+
+ if len(request.services) == 0:
+ raise InvalidArgumentException('services', grpc_message_to_json_string(request), 'must not be empty')
+
+ if not allow_forecasting:
+ LOGGER.warning('Forecaster is explicitly disabled')
+ return topology_details
+
+ env_vars = find_environment_variables([
+ get_env_var_name(ServiceNameEnum.FORECASTER, ENVVAR_SUFIX_SERVICE_HOST ),
+ get_env_var_name(ServiceNameEnum.FORECASTER, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+ ])
+ if len(env_vars) != 2:
+ LOGGER.warning('Forecaster is not deployed')
+ return topology_details
+
+ if len(request.services) > 1:
+ LOGGER.warning('Forecaster does not support multiple services')
+ return topology_details
+
+ service = request.services[0]
+ service_schedule = get_service_schedule(service)
+ if service_schedule is None:
+ LOGGER.warning('Service provides no schedule constraint; forecast cannot be used')
+ return topology_details
+
+ #start_timestamp = service_schedule.start_timestamp
+ duration_days = service_schedule.duration_days
+ if float(duration_days) <= 1.e-12:
+ LOGGER.warning('Service schedule constraint does not define a duration; forecast cannot be used')
+ return topology_details
+
+ forecaster_client = ForecasterClient()
+ forecaster_client.connect()
+
+ forecast_request = ForecastTopologyCapacityRequest(
+ topology_id=topology_details.topology_id,
+ forecast_window_seconds = duration_days * 24 * 60 * 60
+ )
+
+ forecast_reply = forecaster_client.ForecastTopologyCapacity(forecast_request)
+
+ forecasted_link_capacities : Dict[str, ForecastLinkCapacityReply] = {
+ link_capacity.link_id.link_uuid.uuid : link_capacity
+ for link_capacity in forecast_reply.link_capacities
+ }
+
+ for link in topology_details.links:
+ link_uuid = link.link_id.link_uuid.uuid
+ forecasted_link_capacity = forecasted_link_capacities.get(link_uuid)
+ if forecasted_link_capacity is None: continue
+ link.attributes.used_capacity_gbps = forecasted_link_capacity.forecast_used_capacity_gbps
+ if link.attributes.total_capacity_gbps < link.attributes.used_capacity_gbps:
+ total_capacity_gbps = link.attributes.used_capacity_gbps
+ total_capacity_gbps = math.ceil(total_capacity_gbps / 100) * 100 # round up in steps of 100
+ link.attributes.total_capacity_gbps = total_capacity_gbps
+
+ return topology_details
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
index e2c6dc13804703d89242b27156763ce887aa4884..02765901ec1084e32fde440ff531f035249fc750 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
@@ -118,9 +118,25 @@ def compose_link(grpc_link : Link) -> Dict:
for link_endpoint_id in grpc_link.link_endpoint_ids
]
+ total_capacity_gbps, used_capacity_gbps = None, None
+ if grpc_link.HasField('attributes'):
+ attributes = grpc_link.attributes
+ # In proto3, HasField() does not work for scalar fields, using ListFields() instead.
+ attribute_names = set([field.name for field,_ in attributes.ListFields()])
+ if 'total_capacity_gbps' in attribute_names:
+ total_capacity_gbps = attributes.total_capacity_gbps
+ if 'used_capacity_gbps' in attribute_names:
+ used_capacity_gbps = attributes.used_capacity_gbps
+ elif total_capacity_gbps is not None:
+ used_capacity_gbps = total_capacity_gbps
+
+ if total_capacity_gbps is None: total_capacity_gbps = 100
+ if used_capacity_gbps is None: used_capacity_gbps = 0
+ available_capacity_gbps = total_capacity_gbps - used_capacity_gbps
+
forwarding_direction = LinkForwardingDirection.UNIDIRECTIONAL.value
- total_potential_capacity = compose_capacity(200, CapacityUnit.MBPS.value)
- available_capacity = compose_capacity(200, CapacityUnit.MBPS.value)
+ total_potential_capacity = compose_capacity(total_capacity_gbps, CapacityUnit.GBPS.value)
+ available_capacity = compose_capacity(available_capacity_gbps, CapacityUnit.GBPS.value)
cost_characteristics = compose_cost_characteristics('linkcost', '1', '0')
latency_characteristics = compose_latency_characteristics('1')
diff --git a/src/pathcomp/frontend/tests/MockService_Dependencies.py b/src/pathcomp/frontend/tests/MockService_Dependencies.py
index e903bc0e028c7ef97f21d7422f37255574547338..858db17a9e35e30ea93c965815b39a068c696b4b 100644
--- a/src/pathcomp/frontend/tests/MockService_Dependencies.py
+++ b/src/pathcomp/frontend/tests/MockService_Dependencies.py
@@ -17,12 +17,15 @@ from typing import Union
from common.Constants import ServiceNameEnum
from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name
from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from common.proto.monitoring_pb2_grpc import add_MonitoringServiceServicer_to_server
from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.tests.MockServicerImpl_Monitoring import MockServicerImpl_Monitoring
from common.tools.service.GenericGrpcService import GenericGrpcService
LOCAL_HOST = '127.0.0.1'
-SERVICE_CONTEXT = ServiceNameEnum.CONTEXT
+SERVICE_CONTEXT = ServiceNameEnum.CONTEXT
+SERVICE_MONITORING = ServiceNameEnum.MONITORING
class MockService_Dependencies(GenericGrpcService):
# Mock Service implementing Context, Device, and Service to simplify unitary tests of PathComp
@@ -35,6 +38,12 @@ class MockService_Dependencies(GenericGrpcService):
self.context_servicer = MockServicerImpl_Context()
add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+ self.monitoring_servicer = MockServicerImpl_Monitoring()
+ add_MonitoringServiceServicer_to_server(self.monitoring_servicer, self.server)
+
def configure_env_vars(self):
os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address)
os.environ[get_env_var_name(SERVICE_CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
+
+ os.environ[get_env_var_name(SERVICE_MONITORING, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address)
+ os.environ[get_env_var_name(SERVICE_MONITORING, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
diff --git a/src/pathcomp/frontend/tests/PrepareTestScenario.py b/src/pathcomp/frontend/tests/PrepareTestScenario.py
index 387f6aedef1a88559f974a0b792ac1499d42a3f7..8cc06349b3dc61fc62a6711d4cb72c09e39c9a64 100644
--- a/src/pathcomp/frontend/tests/PrepareTestScenario.py
+++ b/src/pathcomp/frontend/tests/PrepareTestScenario.py
@@ -17,16 +17,24 @@ from common.Constants import ServiceNameEnum
from common.Settings import (
ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
from context.client.ContextClient import ContextClient
+from forecaster.client.ForecasterClient import ForecasterClient
+from forecaster.service.ForecasterService import ForecasterService
+from monitoring.client.MonitoringClient import MonitoringClient
from pathcomp.frontend.client.PathCompClient import PathCompClient
from pathcomp.frontend.service.PathCompService import PathCompService
-from pathcomp.frontend.tests.MockService_Dependencies import MockService_Dependencies
+from .MockService_Dependencies import MockService_Dependencies
LOCAL_HOST = '127.0.0.1'
MOCKSERVICE_PORT = 10000
-PATHCOMP_SERVICE_PORT = MOCKSERVICE_PORT + int(get_service_port_grpc(ServiceNameEnum.PATHCOMP)) # avoid privileged ports
+# avoid privileged ports
+PATHCOMP_SERVICE_PORT = MOCKSERVICE_PORT + int(get_service_port_grpc(ServiceNameEnum.PATHCOMP))
os.environ[get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST)
os.environ[get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(PATHCOMP_SERVICE_PORT)
+FORECASTER_SERVICE_PORT = MOCKSERVICE_PORT + int(get_service_port_grpc(ServiceNameEnum.FORECASTER))
+os.environ[get_env_var_name(ServiceNameEnum.FORECASTER, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.FORECASTER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(FORECASTER_SERVICE_PORT)
+
@pytest.fixture(scope='session')
def mock_service():
_service = MockService_Dependencies(MOCKSERVICE_PORT)
@@ -42,8 +50,33 @@ def context_client(mock_service : MockService_Dependencies): # pylint: disable=r
_client.close()
@pytest.fixture(scope='session')
-def pathcomp_service(context_client : ContextClient): # pylint: disable=redefined-outer-name
+def monitoring_client(mock_service : MockService_Dependencies): # pylint: disable=redefined-outer-name
+ _client = MonitoringClient()
+ yield _client
+ _client.close()
+@pytest.fixture(scope='session')
+def forecaster_service(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name
+):
+ _service = ForecasterService()
+ _service.start()
+ yield _service
+ _service.stop()
+
+@pytest.fixture(scope='session')
+def forecaster_client(forecaster_service : ForecasterService): # pylint: disable=redefined-outer-name
+ _client = ForecasterClient()
+ yield _client
+ _client.close()
+
+@pytest.fixture(scope='session')
+def pathcomp_service(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name
+ forecaster_client : ForecasterClient, # pylint: disable=redefined-outer-name
+):
_service = PathCompService()
_service.start()
yield _service
diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py
index f4e3cbf0f60285b960625a677854c4b7ab4decb9..4d5b3549ba52e3ef448a05c6d137f2a75531f3ea 100644
--- a/src/pathcomp/frontend/tests/test_unitary.py
+++ b/src/pathcomp/frontend/tests/test_unitary.py
@@ -56,7 +56,8 @@ os.environ['PATHCOMP_BACKEND_PORT'] = os.environ.get('PATHCOMP_BACKEND_PORT', ba
from .PrepareTestScenario import ( # pylint: disable=unused-import
# be careful, order of symbols is important here!
- mock_service, pathcomp_service, context_client, pathcomp_client)
+ mock_service, context_client, monitoring_client,
+ forecaster_service, forecaster_client, pathcomp_service, pathcomp_client)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
diff --git a/src/pathcomp/frontend/tests/test_unitary_pathcomp_forecaster.py b/src/pathcomp/frontend/tests/test_unitary_pathcomp_forecaster.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6d39d3177f1a3d2275a6b48d68478c0a37e9d6a
--- /dev/null
+++ b/src/pathcomp/frontend/tests/test_unitary_pathcomp_forecaster.py
@@ -0,0 +1,198 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os, pandas, pytest
+from typing import Dict, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
+from common.proto.context_pb2 import ContextId, TopologyId
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.monitoring_pb2 import KpiDescriptor
+from common.proto.pathcomp_pb2 import PathCompRequest
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
+from common.tools.grpc.Tools import grpc_message_to_json
+from common.tools.object_factory.Constraint import (
+ json_constraint_schedule, json_constraint_sla_capacity, json_constraint_sla_latency)
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.EndPoint import json_endpoint_id
+from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
+from common.tools.timestamp.Converters import timestamp_utcnow_to_float
+from context.client.ContextClient import ContextClient
+from forecaster.tests.Tools import compose_descriptors, read_csv
+from monitoring.client.MonitoringClient import MonitoringClient
+from pathcomp.frontend.client.PathCompClient import PathCompClient
+from .MockService_Dependencies import MockService_Dependencies
+
+# configure backend environment variables before overwriting them with fixtures to use real backend pathcomp
+DEFAULT_PATHCOMP_BACKEND_SCHEME = 'http'
+DEFAULT_PATHCOMP_BACKEND_HOST = '127.0.0.1'
+DEFAULT_PATHCOMP_BACKEND_PORT = '8081'
+DEFAULT_PATHCOMP_BACKEND_BASEURL = '/pathComp/api/v1/compRoute'
+
+os.environ['PATHCOMP_BACKEND_SCHEME'] = os.environ.get('PATHCOMP_BACKEND_SCHEME', DEFAULT_PATHCOMP_BACKEND_SCHEME)
+os.environ['PATHCOMP_BACKEND_BASEURL'] = os.environ.get('PATHCOMP_BACKEND_BASEURL', DEFAULT_PATHCOMP_BACKEND_BASEURL)
+
+# Find IP:port of backend container as follows:
+# - first check env vars PATHCOMP_BACKEND_HOST & PATHCOMP_BACKEND_PORT
+# - if not set, check env vars PATHCOMPSERVICE_SERVICE_HOST & PATHCOMPSERVICE_SERVICE_PORT_HTTP
+# - if not set, use DEFAULT_PATHCOMP_BACKEND_HOST & DEFAULT_PATHCOMP_BACKEND_PORT
+backend_host = DEFAULT_PATHCOMP_BACKEND_HOST
+backend_host = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', backend_host)
+os.environ['PATHCOMP_BACKEND_HOST'] = os.environ.get('PATHCOMP_BACKEND_HOST', backend_host)
+
+backend_port = DEFAULT_PATHCOMP_BACKEND_PORT
+backend_port = os.environ.get('PATHCOMPSERVICE_SERVICE_PORT_HTTP', backend_port)
+os.environ['PATHCOMP_BACKEND_PORT'] = os.environ.get('PATHCOMP_BACKEND_PORT', backend_port)
+
+from .PrepareTestScenario import ( # pylint: disable=unused-import
+ # be careful, order of symbols is important here!
+ mock_service, context_client, monitoring_client,
+ forecaster_service, forecaster_client, pathcomp_service, pathcomp_client)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+logging.getLogger('common.tests.InMemoryObjectDatabase').setLevel(logging.INFO)
+logging.getLogger('common.tests.InMemoryTimeSeriesDatabase').setLevel(logging.INFO)
+logging.getLogger('common.tests.MockServicerImpl_Context').setLevel(logging.INFO)
+logging.getLogger('common.tests.MockServicerImpl_Monitoring').setLevel(logging.INFO)
+logging.getLogger('context.client.ContextClient').setLevel(logging.INFO)
+logging.getLogger('monitoring.client.MonitoringClient').setLevel(logging.INFO)
+
+JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID)
+ADMIN_TOPOLOGY_ID = TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=JSON_ADMIN_CONTEXT_ID))
+
+CSV_DATA_FILE = 'forecaster/tests/data/dataset.csv'
+#DESC_DATS_FILE = 'forecaster/tests/data/descriptor.json'
+
+@pytest.fixture(scope='session')
+def scenario() -> Tuple[pandas.DataFrame, Dict]:
+ df = read_csv(CSV_DATA_FILE)
+ descriptors = compose_descriptors(df, num_client_endpoints=5)
+ #with open(DESC_DATS_FILE, 'w', encoding='UTF-8') as f:
+ # f.write(json.dumps(descriptors))
+ yield df, descriptors
+
+def test_prepare_environment(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name
+ mock_service : MockService_Dependencies, # pylint: disable=redefined-outer-name
+ scenario : Tuple[pandas.DataFrame, Dict] # pylint: disable=redefined-outer-name
+) -> None:
+ df, descriptors = scenario
+
+ validate_empty_scenario(context_client)
+ descriptor_loader = DescriptorLoader(descriptors=descriptors, context_client=context_client)
+ results = descriptor_loader.process()
+ check_descriptor_load_results(results, descriptor_loader)
+ descriptor_loader.validate()
+
+ # Verify the scenario has no services/slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 0
+ assert len(response.slice_ids) == 0
+
+ for link in descriptors['links']:
+ link_uuid = link['link_id']['link_uuid']['uuid']
+ kpi_descriptor = KpiDescriptor()
+ kpi_descriptor.kpi_id.kpi_id.uuid = link_uuid # pylint: disable=no-member
+ kpi_descriptor.kpi_description = 'Used Capacity in Link: {:s}'.format(link_uuid)
+ kpi_descriptor.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS
+ kpi_descriptor.link_id.link_uuid.uuid = link_uuid # pylint: disable=no-member
+ monitoring_client.SetKpi(kpi_descriptor)
+
+ mock_service.monitoring_servicer.ts_db._data = df.rename(columns={
+ 'link_id': 'kpi_uuid',
+ 'used_capacity_gbps': 'value'
+ })
+
+def test_request_service_shortestpath_forecast(
+ pathcomp_client : PathCompClient, # pylint: disable=redefined-outer-name
+) -> None:
+
+ start_timestamp = timestamp_utcnow_to_float()
+ duration_days = 1.5
+
+ endpoint_id_a = json_endpoint_id(json_device_id('pt1.pt'), 'client:1')
+ endpoint_id_z = json_endpoint_id(json_device_id('gr1.gr'), 'client:3')
+ context_uuid = DEFAULT_CONTEXT_NAME
+ service_uuid = get_service_uuid(endpoint_id_a, endpoint_id_z)
+ request_service = json_service_l3nm_planned(
+ service_uuid,
+ context_uuid=context_uuid,
+ endpoint_ids=[endpoint_id_a, endpoint_id_z],
+ constraints=[
+ json_constraint_sla_capacity(25.0),
+ json_constraint_sla_latency(20.0),
+ json_constraint_schedule(start_timestamp, duration_days),
+ ]
+ )
+
+ pathcomp_request = PathCompRequest(services=[request_service])
+ pathcomp_request.shortest_path.Clear() # hack to select the shortest path algorithm that has no attributes
+
+ pathcomp_reply = pathcomp_client.Compute(pathcomp_request)
+
+ pathcomp_reply = grpc_message_to_json(pathcomp_reply)
+ reply_services = pathcomp_reply['services']
+ reply_connections = pathcomp_reply['connections']
+ assert len(reply_services) >= 1
+ reply_service_ids = {
+ '{:s}/{:s}'.format(
+ svc['service_id']['context_id']['context_uuid']['uuid'],
+ svc['service_id']['service_uuid']['uuid']
+ )
+ for svc in reply_services
+ }
+ # Assert requested service has a reply
+ # It permits having other services not requested (i.e., sub-services)
+ context_service_uuid = '{:s}/{:s}'.format(context_uuid, service_uuid)
+ assert context_service_uuid in reply_service_ids
+
+ reply_connection_service_ids = {
+ '{:s}/{:s}'.format(
+ conn['service_id']['context_id']['context_uuid']['uuid'],
+ conn['service_id']['service_uuid']['uuid']
+ )
+ for conn in reply_connections
+ }
+ # Assert requested service has a connection associated
+ # It permits having other connections not requested (i.e., connections for sub-services)
+ assert context_service_uuid in reply_connection_service_ids
+
+ # TODO: implement other checks. examples:
+ # - request service and reply service endpoints match
+ # - request service and reply connection endpoints match
+ # - reply sub-service and reply sub-connection endpoints match
+ # - others?
+ #for json_service,json_connection in zip(json_services, json_connections):
+
+def test_cleanup_environment(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ scenario : Tuple[pandas.DataFrame, Dict] # pylint: disable=redefined-outer-name
+) -> None:
+ _, descriptors = scenario
+
+ # Verify the scenario has no services/slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 0
+ assert len(response.slice_ids) == 0
+
+ # Load descriptors and validate the base scenario
+ descriptor_loader = DescriptorLoader(descriptors=descriptors, context_client=context_client)
+ descriptor_loader.validate()
+ descriptor_loader.unload()
+ validate_empty_scenario(context_client)
diff --git a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
index e7fb00029f15d82dbe80c8fff13d098ca5b29f30..4f0c600923b6004d3e4e260d9dad973d1396830c 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
@@ -1295,6 +1295,10 @@ public class Serializer {
return KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED;
case BYTES_RECEIVED:
return KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED;
+ case LINK_TOTAL_CAPACITY_GBPS:
+ return KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS;
+ case LINK_USED_CAPACITY_GBPS:
+ return KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS;
case UNKNOWN:
return KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_UNKNOWN;
default:
@@ -1312,6 +1316,10 @@ public class Serializer {
return KpiSampleType.BYTES_TRANSMITTED;
case KPISAMPLETYPE_BYTES_RECEIVED:
return KpiSampleType.BYTES_RECEIVED;
+ case KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS:
+ return KpiSampleType.LINK_TOTAL_CAPACITY_GBPS;
+ case KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS:
+ return KpiSampleType.LINK_USED_CAPACITY_GBPS;
case KPISAMPLETYPE_UNKNOWN:
default:
return KpiSampleType.UNKNOWN;
diff --git a/src/policy/src/main/java/eu/teraflow/policy/kpi_sample_types/model/KpiSampleType.java b/src/policy/src/main/java/eu/teraflow/policy/kpi_sample_types/model/KpiSampleType.java
index 38257967703bee1d49ae8b1fc7ef11906690b1aa..12551339d9ac5a11e32fbb48871f4c67e0c4700f 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/kpi_sample_types/model/KpiSampleType.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/kpi_sample_types/model/KpiSampleType.java
@@ -21,5 +21,7 @@ public enum KpiSampleType {
PACKETS_TRANSMITTED,
PACKETS_RECEIVED,
BYTES_TRANSMITTED,
- BYTES_RECEIVED
+ BYTES_RECEIVED,
+ LINK_TOTAL_CAPACITY_GBPS,
+ LINK_USED_CAPACITY_GBPS
}
diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
index fb60ef8d1a82417f858fe63845b76b27099f488e..b57bdf10af1bbbfda187e89d7cb3d7951b200db6 100644
--- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
+++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
@@ -2218,6 +2218,12 @@ class SerializerTest {
Arguments.of(
KpiSampleType.BYTES_RECEIVED,
KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED),
+ Arguments.of(
+ KpiSampleType.LINK_TOTAL_CAPACITY_GBPS,
+ KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS),
+ Arguments.of(
+ KpiSampleType.LINK_USED_CAPACITY_GBPS,
+ KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS),
Arguments.of(KpiSampleType.UNKNOWN, KpiSampleTypes.KpiSampleType.KPISAMPLETYPE_UNKNOWN));
}
diff --git a/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java b/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java
index 217672b2e8de2d7c840833a937b0fb04c38a221b..98bdbbd2c364953df27694a839eff3e8f0e1c114 100644
--- a/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java
+++ b/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java
@@ -47,6 +47,14 @@ public final class KpiSampleTypes {
* KPISAMPLETYPE_BYTES_DROPPED = 203;
*/
KPISAMPLETYPE_BYTES_DROPPED(203),
+ /**
+ * KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS = 301;
+ */
+ KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS(301),
+ /**
+ * KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS = 302;
+ */
+ KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS(302),
/**
*
*. can be used by both optical and L3 without any issue @@ -118,6 +126,14 @@ public final class KpiSampleTypes { *KPISAMPLETYPE_BYTES_DROPPED = 203;
*/ public static final int KPISAMPLETYPE_BYTES_DROPPED_VALUE = 203; + /** + *KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS = 301;
+ */ + public static final int KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS_VALUE = 301; + /** + *KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS = 302;
+ */ + public static final int KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS_VALUE = 302; /** **. can be used by both optical and L3 without any issue @@ -191,6 +207,8 @@ public final class KpiSampleTypes { case 201: return KPISAMPLETYPE_BYTES_TRANSMITTED; case 202: return KPISAMPLETYPE_BYTES_RECEIVED; case 203: return KPISAMPLETYPE_BYTES_DROPPED; + case 301: return KPISAMPLETYPE_LINK_TOTAL_CAPACITY_GBPS; + case 302: return KPISAMPLETYPE_LINK_USED_CAPACITY_GBPS; case 401: return KPISAMPLETYPE_ML_CONFIDENCE; case 501: return KPISAMPLETYPE_OPTICAL_SECURITY_STATUS; case 601: return KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS; diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html index 8ca7faee3e1871d11b819c6ca95668e654041f8c..864d0cdb20b8098a100c9c5f32ca637c20af9aac 100644 --- a/src/webui/service/templates/link/detail.html +++ b/src/webui/service/templates/link/detail.html @@ -79,6 +79,29 @@ +Attributes: +
Key | +Value | +
---|---|
+ {{ field_descriptor.name }} + | ++ {{ field_value }} + | +