Skip to content
Snippets Groups Projects
Commit 8d581e12 authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Forecaster component:

- Corrected unitary tests
- Added info log messages in tests
- Activated live log in test scripts
parent ea3e90c3
No related branches found
No related tags found
2 merge requests!235Release TeraFlowSDN 3.0,!160Resolve "(CTTC) Forecaster component"
...@@ -20,5 +20,6 @@ cd $PROJECTDIR/src ...@@ -20,5 +20,6 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
coverage run --rcfile=$RCFILE --append -m pytest --log-level=DEBUG -o log_cli=true --verbose \
forecaster/tests/test_unitary.py forecaster/tests/test_unitary.py
...@@ -12,17 +12,19 @@ ...@@ -12,17 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import calendar, math, pandas import calendar, logging, math, pandas
from datetime import datetime, timezone from datetime import datetime, timezone
from typing import Dict from typing import Dict
from common.tools.object_factory.Context import json_context from common.tools.object_factory.Context import json_context
from common.tools.object_factory.Device import ( from common.tools.object_factory.Device import (
json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id
) )
from common.tools.object_factory.EndPoint import json_endpoint_descriptor, json_endpoint_id from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_descriptor, json_endpoint_id
from common.tools.object_factory.Link import json_link from common.tools.object_factory.Link import json_link
from common.tools.object_factory.Topology import json_topology from common.tools.object_factory.Topology import json_topology
LOGGER = logging.getLogger(__name__)
def time_datetime_to_int(dt_time : datetime) -> int: def time_datetime_to_int(dt_time : datetime) -> int:
return int(calendar.timegm(dt_time.timetuple())) return int(calendar.timegm(dt_time.timetuple()))
...@@ -36,8 +38,13 @@ def time_utc_now_to_float() -> float: ...@@ -36,8 +38,13 @@ def time_utc_now_to_float() -> float:
return time_datetime_to_float(time_utc_now_to_datetime()) return time_datetime_to_float(time_utc_now_to_datetime())
def read_csv(csv_file : str) -> pandas.DataFrame: def read_csv(csv_file : str) -> pandas.DataFrame:
LOGGER.info('Using Data File "{:s}"...'.format(csv_file))
LOGGER.info('Loading...')
df = pandas.read_csv(csv_file) df = pandas.read_csv(csv_file)
LOGGER.info(' DONE')
LOGGER.info('Parsing and Adapting columns...')
if 'dataset.csv' in csv_file: if 'dataset.csv' in csv_file:
df.rename(columns={'linkid': 'link_id', 'ds': 'timestamp', 'y': 'used_capacity_gbps'}, inplace=True) df.rename(columns={'linkid': 'link_id', 'ds': 'timestamp', 'y': 'used_capacity_gbps'}, inplace=True)
df[['source', 'destination']] = df['link_id'].str.split('_', expand=True) df[['source', 'destination']] = df['link_id'].str.split('_', expand=True)
...@@ -46,18 +53,26 @@ def read_csv(csv_file : str) -> pandas.DataFrame: ...@@ -46,18 +53,26 @@ def read_csv(csv_file : str) -> pandas.DataFrame:
df.rename(columns={ df.rename(columns={
'target': 'destination', 'id': 'link_id', 'ds': 'timestamp', 'demandValue': 'used_capacity_gbps' 'target': 'destination', 'id': 'link_id', 'ds': 'timestamp', 'demandValue': 'used_capacity_gbps'
}, inplace=True) }, inplace=True)
LOGGER.info(' DONE')
LOGGER.info('Updating timestamps...')
df['timestamp'] = pandas.to_datetime(df['timestamp']) df['timestamp'] = pandas.to_datetime(df['timestamp'])
max_timestamp = time_datetime_to_int(df['timestamp'].max()) max_timestamp = time_datetime_to_int(df['timestamp'].max())
now_timestamp = time_datetime_to_int(datetime.now(tz=timezone.utc)) now_timestamp = time_datetime_to_int(datetime.now(tz=timezone.utc))
df['timestamp'] = df['timestamp'] + pandas.offsets.Second(now_timestamp - max_timestamp) df['timestamp'] = df['timestamp'] + pandas.offsets.Second(now_timestamp - max_timestamp)
LOGGER.info(' DONE')
LOGGER.info('Sorting...')
df.sort_values('timestamp', ascending=True, inplace=True) df.sort_values('timestamp', ascending=True, inplace=True)
LOGGER.info(' DONE')
return df return df
def compose_descriptors(df : pandas.DataFrame) -> Dict: def compose_descriptors(df : pandas.DataFrame) -> Dict:
devices = dict() devices = dict()
links = dict() links = dict()
LOGGER.info('Discovering Devices and Links...')
#df1.groupby(['A','B']).size().reset_index().rename(columns={0:'count'}) #df1.groupby(['A','B']).size().reset_index().rename(columns={0:'count'})
df_links = df[['link_id', 'source', 'destination']].drop_duplicates() df_links = df[['link_id', 'source', 'destination']].drop_duplicates()
for row in df_links.itertuples(index=False): for row in df_links.itertuples(index=False):
...@@ -86,18 +101,21 @@ def compose_descriptors(df : pandas.DataFrame) -> Dict: ...@@ -86,18 +101,21 @@ def compose_descriptors(df : pandas.DataFrame) -> Dict:
'dst_dev': dst_device_uuid, 'dst_port': src_device_uuid, 'dst_dev': dst_device_uuid, 'dst_port': src_device_uuid,
'total_capacity_gbps': total_capacity_gbps, 'used_capacity_gbps': used_capacity_gbps, 'total_capacity_gbps': total_capacity_gbps, 'used_capacity_gbps': used_capacity_gbps,
} }
LOGGER.info(' Found {:d} devices and {:d} links...'.format(len(devices), len(links)))
LOGGER.info('Composing Descriptors...')
_context = json_context('admin', name='admin') _context = json_context('admin', name='admin')
_topology = json_topology('admin', name='admin', context_id=_context['context_id']) _topology = json_topology('admin', name='admin', context_id=_context['context_id'])
descriptor = { descriptor = {
'dummy_mode': True, # inject the descriptors directly into the Context component
'contexts': [_context], 'contexts': [_context],
'topologies': [_topology], 'topologies': [_topology],
'devices': [ 'devices': [
json_device_emulated_packet_router_disabled( json_device_emulated_packet_router_disabled(
device_uuid, name=device_uuid, config_rules=json_device_emulated_connect_rules([ device_uuid, name=device_uuid, endpoints=[
json_endpoint_descriptor(endpoint_uuid, 'copper', endpoint_name=endpoint_uuid) json_endpoint(json_device_id(device_uuid), endpoint_uuid, 'copper')
for endpoint_uuid in device_data['endpoints'] for endpoint_uuid in device_data['endpoints']
])) ], config_rules=json_device_emulated_connect_rules([]))
for device_uuid,device_data in devices.items() for device_uuid,device_data in devices.items()
], ],
'links': [ 'links': [
...@@ -109,4 +127,5 @@ def compose_descriptors(df : pandas.DataFrame) -> Dict: ...@@ -109,4 +127,5 @@ def compose_descriptors(df : pandas.DataFrame) -> Dict:
for link_uuid,link_data in links.items() for link_uuid,link_data in links.items()
], ],
} }
LOGGER.info(' DONE')
return descriptor return descriptor
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment