From 940abeed8f4aa12356ec09535bd5e3d379e5002f Mon Sep 17 00:00:00 2001 From: Panagiotis Famelis Date: Tue, 14 Feb 2023 11:48:14 +0200 Subject: [PATCH 01/43] fix: removed assertions in scripts as they are broken (for now) --- src/tests/p4/tests/test_functional_bootstrap.py | 2 -- src/tests/p4/tests/test_functional_cleanup.py | 3 --- src/tests/p4/tests/test_functional_create_service.py | 11 +---------- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/src/tests/p4/tests/test_functional_bootstrap.py b/src/tests/p4/tests/test_functional_bootstrap.py index 5e39490f2..972692173 100644 --- a/src/tests/p4/tests/test_functional_bootstrap.py +++ b/src/tests/p4/tests/test_functional_bootstrap.py @@ -106,8 +106,6 @@ def test_devices_bootstraping( link_uuid = link['link_id']['link_uuid']['uuid'] LOGGER.info('Adding Link {:s}'.format(link_uuid)) response = context_client.SetLink(Link(**link)) - assert response.name == link_uuid - context_client.SetLink(Link(**link)) def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- diff --git a/src/tests/p4/tests/test_functional_cleanup.py b/src/tests/p4/tests/test_functional_cleanup.py index 852f2a655..aad56a210 100644 --- a/src/tests/p4/tests/test_functional_cleanup.py +++ b/src/tests/p4/tests/test_functional_cleanup.py @@ -58,7 +58,6 @@ def test_scenario_cleanup( device_uuid = device_id['device_uuid']['uuid'] LOGGER.info('Deleting Device {:s}'.format(device_uuid)) device_client.DeleteDevice(DeviceId(**device_id)) - #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) response = context_client.ListDevices(Empty()) assert len(response.devices) == 0 @@ -72,7 +71,6 @@ def test_scenario_cleanup( LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) context_client.RemoveTopology(TopologyId(**topology_id)) context_id = json_context_id(context_uuid) - #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- for context in CONTEXTS: @@ -80,4 +78,3 @@ def test_scenario_cleanup( context_uuid = context_id['context_uuid']['uuid'] LOGGER.info('Deleting Context {:s}'.format(context_uuid)) context_client.RemoveContext(ContextId(**context_id)) - #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) diff --git a/src/tests/p4/tests/test_functional_create_service.py b/src/tests/p4/tests/test_functional_create_service.py index beaa23ba3..76a681eea 100644 --- a/src/tests/p4/tests/test_functional_create_service.py +++ b/src/tests/p4/tests/test_functional_create_service.py @@ -54,15 +54,6 @@ def service_client(): def test_rules_entry( context_client : ContextClient, device_client : DeviceClient, service_client : ServiceClient): # pylint: disable=redefined-outer-name - - -# for device, _, __ in DEVICES: -# # Enable device -# device_p4_with_operational_status = copy.deepcopy(device) -# device_p4_with_operational_status['device_operational_status'] = \ -# DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED -# device_client.ConfigureDevice(Device(**device_p4_with_operational_status)) - # ----- Create Services --------------------------------------------------------------- for service, endpoints in SERVICES: # Insert Service (table entries) @@ -71,4 +62,4 @@ def test_rules_entry( service_p4 = copy.deepcopy(service) service_client.CreateService(Service(**service_p4)) service_p4['service_endpoint_ids'].extend(endpoints) - service_client.UpdateService(Service(**service_p4)) \ No newline at end of file + service_client.UpdateService(Service(**service_p4)) -- GitLab From 4fed2234918f1cf6308484ccc46fde9c9aa52645 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Renom Date: Thu, 16 Feb 2023 15:01:36 +0000 Subject: [PATCH 02/43] Monitoring component: - Added wait for device component --- src/monitoring/service/__main__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/monitoring/service/__main__.py b/src/monitoring/service/__main__.py index fc460151b..14f560960 100644 --- a/src/monitoring/service/__main__.py +++ b/src/monitoring/service/__main__.py @@ -69,6 +69,8 @@ def main(): wait_for_environment_variables([ get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) signal.signal(signal.SIGINT, signal_handler) -- GitLab From bb094bf1bca8853fa691e439e14af535ddc7fdd1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 16 Feb 2023 17:15:15 +0000 Subject: [PATCH 03/43] Common - Context Queries: - Added method to get Context - Added method to get Link --- src/common/tools/context_queries/Context.py | 18 +++++++++++++++++- src/common/tools/context_queries/Link.py | 19 +++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/src/common/tools/context_queries/Context.py b/src/common/tools/context_queries/Context.py index d28ca3991..a627b9ba5 100644 --- a/src/common/tools/context_queries/Context.py +++ b/src/common/tools/context_queries/Context.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.proto.context_pb2 import Context, Empty +import grpc +from typing import Optional +from common.proto.context_pb2 import Context, ContextId, Empty from common.tools.object_factory.Context import json_context from context.client.ContextClient import ContextClient @@ -23,3 +25,17 @@ def create_context( existing_context_uuids = {context_id.context_uuid.uuid for context_id in existing_context_ids.context_ids} if context_uuid in existing_context_uuids: return context_client.SetContext(Context(**json_context(context_uuid))) + +def get_context(context_client : ContextClient, context_uuid : str, rw_copy : bool = False) -> Optional[Context]: + try: + # pylint: disable=no-member + context_id = ContextId() + context_id.context_uuid.uuid = context_uuid + ro_context = context_client.GetContext(context_id) + if not rw_copy: return ro_context + rw_context = Context() + rw_context.CopyFrom(ro_context) + return rw_context + except grpc.RpcError: + #LOGGER.exception('Unable to get Context({:s})'.format(str(context_uuid))) + return None diff --git a/src/common/tools/context_queries/Link.py b/src/common/tools/context_queries/Link.py index 83a878bde..291cdcf37 100644 --- a/src/common/tools/context_queries/Link.py +++ b/src/common/tools/context_queries/Link.py @@ -12,11 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Set -from common.proto.context_pb2 import ContextId, Empty, Link, Topology, TopologyId +import grpc +from typing import List, Optional, Set +from common.proto.context_pb2 import ContextId, Empty, Link, LinkId, Topology, TopologyId from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient +def get_link(context_client : ContextClient, link_uuid : str, rw_copy : bool = False) -> Optional[Link]: + try: + # pylint: disable=no-member + link_id = LinkId() + link_id.link_uuid.uuid = link_uuid + ro_link = context_client.GetLink(link_id) + if not rw_copy: return ro_link + rw_link = Link() + rw_link.CopyFrom(ro_link) + return rw_link + except grpc.RpcError: + #LOGGER.exception('Unable to get Link({:s})'.format(str(link_uuid))) + return None + def get_existing_link_uuids(context_client : ContextClient) -> Set[str]: existing_link_ids = context_client.ListLinkIds(Empty()) existing_link_uuids = {link_id.link_uuid.uuid for link_id in existing_link_ids.link_ids} -- GitLab From 36b2d7923dc017ad0269050fbedfbb04fbce3a72 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 16 Feb 2023 17:15:52 +0000 Subject: [PATCH 04/43] Common - MutexQueues: - Added safety control --- src/common/tools/mutex_queues/MutexQueues.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/common/tools/mutex_queues/MutexQueues.py b/src/common/tools/mutex_queues/MutexQueues.py index b9fc567d5..96e22a86f 100644 --- a/src/common/tools/mutex_queues/MutexQueues.py +++ b/src/common/tools/mutex_queues/MutexQueues.py @@ -35,7 +35,7 @@ # self.mutex_queues.signal_done(device_uuid) import threading -from queue import Queue +from queue import Queue, Empty from typing import Dict class MutexQueues: @@ -67,8 +67,11 @@ class MutexQueues: with self.lock: queue : Queue = self.mutex_queues.setdefault(queue_name, Queue()) - # remove muself from the queue - queue.get_nowait() + # remove myself from the queue + try: + queue.get(block=True, timeout=0.1) + except Empty: + pass # if there are no other tasks queued, return if queue.qsize() == 0: return -- GitLab From 24776f2c27ba154308d72dd47932a8c2b84cb854 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 08:55:17 +0000 Subject: [PATCH 05/43] Device component - Emulated Driver: - Reduced log level in some messages --- .../service/drivers/emulated/SyntheticSamplingParameters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py index ea5cf2cb7..5bbbf89e8 100644 --- a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py +++ b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py @@ -51,7 +51,7 @@ class SyntheticSamplingParameters: metric = match.group(2) metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '') - LOGGER.info(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense)) + LOGGER.debug(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense)) parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense) parameters = self.__data.get(parameters_key) -- GitLab From 4d3315aa12a5139ac12587006e65cf2037e5ff6d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:07:09 +0000 Subject: [PATCH 06/43] WebUI component: - added validation entities exist - added missing constraint rendering statements - minor template improvements - code cleanup --- src/webui/service/__init__.py | 1 + src/webui/service/device/forms.py | 13 +- src/webui/service/device/routes.py | 106 +++++++-------- src/webui/service/link/routes.py | 46 ++++--- src/webui/service/service/routes.py | 98 +++++++------- src/webui/service/slice/routes.py | 122 +++++++++--------- src/webui/service/templates/base.html | 2 +- .../service/templates/device/detail.html | 5 +- .../service/templates/service/detail.html | 83 +++++++----- src/webui/service/templates/slice/detail.html | 69 ++++++++-- 10 files changed, 314 insertions(+), 231 deletions(-) diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index d5b40b486..ef5253b87 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -96,6 +96,7 @@ def create_app(use_config=None, web_app_root=None): app.register_blueprint(link) app.jinja_env.globals.update({ # pylint: disable=no-member + 'enumerate' : enumerate, 'json_to_list' : json_to_list, 'get_working_context' : get_working_context, 'get_working_topology': get_working_topology, diff --git a/src/webui/service/device/forms.py b/src/webui/service/device/forms.py index e496c4d43..c6bacac9b 100644 --- a/src/webui/service/device/forms.py +++ b/src/webui/service/device/forms.py @@ -12,21 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# external imports from flask_wtf import FlaskForm -from wtforms import StringField, SelectField, TextAreaField, SubmitField, BooleanField, Form -from wtforms.validators import DataRequired, Length, NumberRange, Regexp, ValidationError +from wtforms import StringField, SelectField, TextAreaField, SubmitField, BooleanField +from wtforms.validators import DataRequired, Length, NumberRange, ValidationError from common.proto.context_pb2 import DeviceOperationalStatusEnum -from webui.utils.form_validators import key_value_validator class AddDeviceForm(FlaskForm): device_id = StringField('ID', validators=[DataRequired(), Length(min=5)]) - device_type = SelectField('Type', choices = []) - operational_status = SelectField('Operational Status', - # choices=[(-1, 'Select...'), (0, 'Undefined'), (1, 'Disabled'), (2, 'Enabled')], - coerce=int, - validators=[NumberRange(min=0)]) + device_type = SelectField('Type') + operational_status = SelectField('Operational Status', coerce=int, validators=[NumberRange(min=0)]) device_drivers_undefined = BooleanField('UNDEFINED / EMULATED') device_drivers_openconfig = BooleanField('OPENCONFIG') device_drivers_transport_api = BooleanField('TRANSPORT_API') diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index ce3edcfda..ebf77a35f 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -14,16 +14,14 @@ import json from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for +from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( - ConfigActionEnum, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, Empty, TopologyId) -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Topology import json_topology_id + ConfigActionEnum, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, Empty) +from common.tools.context_queries.Device import get_device +from common.tools.context_queries.Topology import get_topology from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from webui.service.device.forms import AddDeviceForm -from common.DeviceTypes import DeviceTypeEnum -from webui.service.device.forms import ConfigForm -from webui.service.device.forms import UpdateDeviceForm +from webui.service.device.forms import AddDeviceForm, ConfigForm, UpdateDeviceForm device = Blueprint('device', __name__, url_prefix='/device') context_client = ContextClient() @@ -39,17 +37,19 @@ def home(): topology_uuid = session['topology_uuid'] context_client.connect() - json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) - grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) - topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} - grpc_devices: DeviceList = context_client.ListDevices(Empty()) + grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False) + if grpc_topology is None: + flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger') + devices = [] + else: + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + grpc_devices: DeviceList = context_client.ListDevices(Empty()) + devices = [ + device for device in grpc_devices.devices + if device.device_id.device_uuid.uuid in topo_device_uuids + ] context_client.close() - devices = [ - device for device in grpc_devices.devices - if device.device_id.device_uuid.uuid in topo_device_uuids - ] - return render_template( 'device/home.html', devices=devices, dde=DeviceDriverEnum, dose=DeviceOperationalStatusEnum) @@ -71,23 +71,23 @@ def add(): if form.validate_on_submit(): device_obj = Device() # Device UUID: - device_obj.device_id.device_uuid.uuid = form.device_id.data + device_obj.device_id.device_uuid.uuid = form.device_id.data # pylint: disable=no-member # Device type: device_obj.device_type = str(form.device_type.data) # Device configurations: - config_rule = device_obj.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/address' config_rule.custom.resource_value = form.device_config_address.data - config_rule = device_obj.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/port' config_rule.custom.resource_value = form.device_config_port.data - config_rule = device_obj.device_config.config_rules.add() + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = '_connect/settings' @@ -105,20 +105,22 @@ def add(): device_obj.device_operational_status = form.operational_status.data # Device drivers: + device_drivers = list() if form.device_drivers_undefined.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_UNDEFINED) if form.device_drivers_openconfig.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG) if form.device_drivers_transport_api.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API) if form.device_drivers_p4.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_P4) if form.device_drivers_ietf_network_topology.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY) if form.device_drivers_onf_tr_352.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352) if form.device_drivers_xr.data: - device_obj.device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR) + device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_XR) + device_obj.device_drivers.extend(device_drivers) # pylint: disable=no-member try: device_client.connect() @@ -126,7 +128,7 @@ def add(): device_client.close() flash(f'New device was created with ID "{response.device_uuid.uuid}".', 'success') return redirect(url_for('device.home')) - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem adding the device. {e.details()}', 'danger') return render_template('device/add.html', form=form, @@ -134,14 +136,15 @@ def add(): @device.route('detail/', methods=['GET', 'POST']) def detail(device_uuid: str): - request = DeviceId() - request.device_uuid.uuid = device_uuid context_client.connect() - response = context_client.GetDevice(request) + device_obj = get_device(context_client, device_uuid, rw_copy=False) + if device_obj is None: + flash('Device({:s}) not found'.format(str(device_uuid)), 'danger') + device_obj = Device() context_client.close() - return render_template('device/detail.html', device=response, - dde=DeviceDriverEnum, - dose=DeviceOperationalStatusEnum) + + return render_template( + 'device/detail.html', device=device_obj, dde=DeviceDriverEnum, dose=DeviceOperationalStatusEnum) @device.get('/delete') def delete(device_uuid): @@ -154,13 +157,13 @@ def delete(device_uuid): # TODO: finalize implementation request = DeviceId() - request.device_uuid.uuid = device_uuid + request.device_uuid.uuid = device_uuid # pylint: disable=no-member device_client.connect() - response = device_client.DeleteDevice(request) + device_client.DeleteDevice(request) device_client.close() flash(f'Device "{device_uuid}" deleted successfully!', 'success') - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem deleting device "{device_uuid}": {e.details()}', 'danger') current_app.logger.exception(e) return redirect(url_for('device.home')) @@ -169,25 +172,25 @@ def delete(device_uuid): def addconfig(device_uuid): form = ConfigForm() request = DeviceId() - request.device_uuid.uuid = device_uuid + request.device_uuid.uuid = device_uuid # pylint: disable=no-member context_client.connect() response = context_client.GetDevice(request) context_client.close() if form.validate_on_submit(): - device = Device() - device.CopyFrom(response) - config_rule = device.device_config.config_rules.add() + device_obj = Device() + device_obj.CopyFrom(response) + config_rule = device_obj.device_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.custom.resource_key = form.device_key_config.data config_rule.custom.resource_value = form.device_value_config.data try: device_client.connect() - response: DeviceId = device_client.ConfigureDevice(device) + response: DeviceId = device_client.ConfigureDevice(device_obj) device_client.close() flash(f'New configuration was created with ID "{response.device_uuid.uuid}".', 'success') return redirect(url_for('device.home')) - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem adding the device. {e.details()}', 'danger') return render_template('device/addconfig.html', form=form, submit_text='Add New Configuration') @@ -203,28 +206,29 @@ def updateconfig(): def update(device_uuid): form = UpdateDeviceForm() request = DeviceId() - request.device_uuid.uuid = device_uuid + request.device_uuid.uuid = device_uuid # pylint: disable=no-member context_client.connect() response = context_client.GetDevice(request) context_client.close() # listing enum values form.update_operational_status.choices = [] - for key, value in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items(): - form.update_operational_status.choices.append((DeviceOperationalStatusEnum.Value(key), key.replace('DEVICEOPERATIONALSTATUS_', ''))) + for key, _ in DeviceOperationalStatusEnum.DESCRIPTOR.values_by_name.items(): + item = (DeviceOperationalStatusEnum.Value(key), key.replace('DEVICEOPERATIONALSTATUS_', '')) + form.update_operational_status.choices.append(item) form.update_operational_status.default = response.device_operational_status if form.validate_on_submit(): - device = Device() - device.CopyFrom(response) - device.device_operational_status = form.update_operational_status.data + device_obj = Device() + device_obj.CopyFrom(response) + device_obj.device_operational_status = form.update_operational_status.data try: device_client.connect() - response: DeviceId = device_client.ConfigureDevice(device) + response: DeviceId = device_client.ConfigureDevice(device_obj) device_client.close() flash(f'Status of device with ID "{response.device_uuid.uuid}" was updated.', 'success') return redirect(url_for('device.home')) - except Exception as e: + except Exception as e: # pylint: disable=broad-except flash(f'Problem updating the device. {e.details()}', 'danger') return render_template('device/update.html', device=response, form=form, submit_text='Update Device') diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py index 9324ad0be..0fda8958e 100644 --- a/src/webui/service/link/routes.py +++ b/src/webui/service/link/routes.py @@ -14,10 +14,10 @@ from flask import render_template, Blueprint, flash, session, redirect, url_for -from common.proto.context_pb2 import Empty, LinkId, LinkList, TopologyId +from common.proto.context_pb2 import Empty, Link, LinkList from common.tools.context_queries.EndPoint import get_endpoint_names -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Topology import json_topology_id +from common.tools.context_queries.Link import get_link +from common.tools.context_queries.Topology import get_topology from context.client.ContextClient import ContextClient @@ -33,20 +33,21 @@ def home(): context_uuid = session['context_uuid'] topology_uuid = session['topology_uuid'] + links, endpoint_ids = list(), list() + device_names, endpoints_data = dict(), dict() + context_client.connect() - json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) - grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) - topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids} - grpc_links: LinkList = context_client.ListLinks(Empty()) - - endpoint_ids = [] - links = [] - for link_ in grpc_links.links: - if link_.link_id.link_uuid.uuid not in topo_link_uuids: continue - links.append(link_) - endpoint_ids.extend(link_.link_endpoint_ids) - - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False) + if grpc_topology is None: + flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger') + else: + topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids} + grpc_links: LinkList = context_client.ListLinks(Empty()) + for link_ in grpc_links.links: + if link_.link_id.link_uuid.uuid not in topo_link_uuids: continue + links.append(link_) + endpoint_ids.extend(link_.link_endpoint_ids) + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) context_client.close() return render_template('link/home.html', links=links, device_names=device_names, endpoints_data=endpoints_data) @@ -54,10 +55,13 @@ def home(): @link.route('detail/', methods=('GET', 'POST')) def detail(link_uuid: str): - request = LinkId() - request.link_uuid.uuid = link_uuid # pylint: disable=no-member context_client.connect() - response = context_client.GetLink(request) - device_names, endpoints_data = get_endpoint_names(context_client, response.link_endpoint_ids) + link_obj = get_link(context_client, link_uuid, rw_copy=False) + if link_obj is None: + flash('Link({:s}) not found'.format(str(link_uuid)), 'danger') + link_obj = Link() + device_names, endpoints_data = dict(), dict() + else: + device_names, endpoints_data = get_endpoint_names(context_client, link_obj.link_endpoint_ids) context_client.close() - return render_template('link/detail.html',link=response, device_names=device_names, endpoints_data=endpoints_data) + return render_template('link/detail.html',link=link_obj, device_names=device_names, endpoints_data=endpoints_data) diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py index ee9b092ae..defbe2cb0 100644 --- a/src/webui/service/service/routes.py +++ b/src/webui/service/service/routes.py @@ -14,8 +14,11 @@ import grpc from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for -from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection +from common.proto.context_pb2 import ( + IsolationLevelEnum, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection) +from common.tools.context_queries.Context import get_context from common.tools.context_queries.EndPoint import get_endpoint_names +from common.tools.context_queries.Service import get_service from context.client.ContextClient import ContextClient from service.client.ServiceClient import ServiceClient @@ -26,93 +29,94 @@ service_client = ServiceClient() @service.get('/') def home(): - # flash('This is an info message', 'info') - # flash('This is a danger message', 'danger') - - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - request = ContextId() - request.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + context_client.connect() - try: - service_list = context_client.ListServices(request) - # print(service_list) - services = service_list.services - context_found = True - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise - if e.details() != 'Context({:s}) not found'.format(context_uuid): raise - services = [] - context_found = False - - if context_found: - endpoint_ids = [] - for service_ in services: - endpoint_ids.extend(service_.service_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + + context_obj = get_context(context_client, context_uuid, rw_copy=False) + if context_obj is None: + flash('Context({:s}) not found'.format(str(context_uuid)), 'danger') + services, device_names, endpoints_data = list(), list(), list() else: - device_names, endpoints_data = [],[] + try: + services = context_client.ListServices(context_obj.context_id) + services = services.services + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise + if e.details() != 'Context({:s}) not found'.format(context_uuid): raise + services, device_names, endpoints_data = list(), dict(), dict() + else: + endpoint_ids = list() + for service_ in services: + endpoint_ids.extend(service_.service_endpoint_ids) + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) context_client.close() return render_template( 'service/home.html', services=services, device_names=device_names, endpoints_data=endpoints_data, - context_not_found=not context_found, ste=ServiceTypeEnum, sse=ServiceStatusEnum) + ste=ServiceTypeEnum, sse=ServiceStatusEnum) @service.route('add', methods=['GET', 'POST']) def add(): flash('Add service route called', 'danger') raise NotImplementedError() - return render_template('service/home.html') + #return render_template('service/home.html') @service.get('/detail') def detail(service_uuid: str): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - - request: ServiceId = ServiceId() - request.service_uuid.uuid = service_uuid - request.context_id.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + try: context_client.connect() - response: Service = context_client.GetService(request) - connections: Connection = context_client.ListConnections(request) - connections = connections.connections - endpoint_ids = [] - endpoint_ids.extend(response.service_endpoint_ids) - for connection in connections: - endpoint_ids.extend(connection.path_hops_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + endpoint_ids = list() + service_obj = get_service(context_client, service_uuid, rw_copy=False) + if service_obj is None: + flash('Context({:s})/Service({:s}) not found'.format(str(context_uuid), str(service_uuid)), 'danger') + service_obj = Service() + else: + endpoint_ids.extend(service_obj.service_endpoint_ids) + connections: Connection = context_client.ListConnections(service_obj.service_id) + connections = connections.connections + for connection in connections: endpoint_ids.extend(connection.path_hops_endpoint_ids) + + if len(endpoint_ids) > 0: + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + else: + device_names, endpoints_data = dict(), dict() context_client.close() + + return render_template( + 'service/detail.html', service=service_obj, connections=connections, device_names=device_names, + endpoints_data=endpoints_data, ste=ServiceTypeEnum, sse=ServiceStatusEnum, ile=IsolationLevelEnum) except Exception as e: flash('The system encountered an error and cannot show the details of this service.', 'warning') current_app.logger.exception(e) return redirect(url_for('service.home')) - return render_template( - 'service/detail.html', service=response, connections=connections, device_names=device_names, - endpoints_data=endpoints_data, ste=ServiceTypeEnum, sse=ServiceStatusEnum) @service.get('/delete') def delete(service_uuid: str): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) + context_uuid = session['context_uuid'] try: request = ServiceId() request.service_uuid.uuid = service_uuid request.context_id.context_uuid.uuid = context_uuid service_client.connect() - response = service_client.DeleteService(request) + service_client.DeleteService(request) service_client.close() flash('Service "{:s}" deleted successfully!'.format(service_uuid), 'success') diff --git a/src/webui/service/slice/routes.py b/src/webui/service/slice/routes.py index 222508418..cd1b672d5 100644 --- a/src/webui/service/slice/routes.py +++ b/src/webui/service/slice/routes.py @@ -11,11 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# + import grpc from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for -from common.proto.context_pb2 import ContextId, Slice, SliceId, SliceStatusEnum +from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum +from common.tools.context_queries.Context import get_context from common.tools.context_queries.EndPoint import get_endpoint_names +from common.tools.context_queries.Slice import get_slice from context.client.ContextClient import ContextClient from slice.client.SliceClient import SliceClient @@ -26,92 +28,88 @@ slice_client = SliceClient() @slice.get('/') def home(): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - request = ContextId() - request.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + context_client.connect() - try: - slice_list = context_client.ListSlices(request) - slices = slice_list.slices - context_found = True - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise - if e.details() != 'Context({:s}) not found'.format(context_uuid): raise - slices = [] - context_found = False - - if context_found: - endpoint_ids = [] - for slice_ in slices: - endpoint_ids.extend(slice_.slice_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + + context_obj = get_context(context_client, context_uuid, rw_copy=False) + if context_obj is None: + flash('Context({:s}) not found'.format(str(context_uuid)), 'danger') + device_names, endpoints_data = list(), list() else: - device_names, endpoints_data = [],[] + try: + slices = context_client.ListSlices(context_obj.context_id) + slices = slices.slices + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise + if e.details() != 'Context({:s}) not found'.format(context_uuid): raise + slices, device_names, endpoints_data = list(), dict(), dict() + else: + endpoint_ids = list() + for slice_ in slices: + endpoint_ids.extend(slice_.slice_endpoint_ids) + device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) context_client.close() - return render_template( 'slice/home.html', slices=slices, device_names=device_names, endpoints_data=endpoints_data, - context_not_found=not context_found, sse=SliceStatusEnum) + sse=SliceStatusEnum) @slice.route('add', methods=['GET', 'POST']) def add(): flash('Add slice route called', 'danger') raise NotImplementedError() - return render_template('slice/home.html') + #return render_template('slice/home.html') @slice.get('/detail') def detail(slice_uuid: str): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - - request: SliceId = SliceId() - request.slice_uuid.uuid = slice_uuid - request.context_id.context_uuid.uuid = context_uuid - req = ContextId() - req.context_uuid.uuid = context_uuid + context_uuid = session['context_uuid'] + try: context_client.connect() - response: Slice = context_client.GetSlice(request) - services = context_client.ListServices(req) - endpoint_ids = [] - endpoint_ids.extend(response.slice_endpoint_ids) - device_names, endpoints_data = get_endpoint_names(context_client, endpoint_ids) + slice_obj = get_slice(context_client, slice_uuid, rw_copy=False) + if slice_obj is None: + flash('Context({:s})/Slice({:s}) not found'.format(str(context_uuid), str(slice_uuid)), 'danger') + slice_obj = Slice() + else: + device_names, endpoints_data = get_endpoint_names(context_client, slice_obj.slice_endpoint_ids) context_client.close() + + return render_template( + 'slice/detail.html', slice=slice_obj, device_names=device_names, endpoints_data=endpoints_data, + sse=SliceStatusEnum, ile=IsolationLevelEnum) except Exception as e: flash('The system encountered an error and cannot show the details of this slice.', 'warning') current_app.logger.exception(e) return redirect(url_for('slice.home')) - return render_template( - 'slice/detail.html', slice=response, device_names=device_names, endpoints_data=endpoints_data, - sse=SliceStatusEnum, services=services) - -#@slice.get('/delete') -#def delete(slice_uuid: str): -# context_uuid = session.get('context_uuid', '-') -# if context_uuid == "-": -# flash("Please select a context!", "warning") -# return redirect(url_for("main.home")) -# -# try: -# request = SliceId() -# request.slice_uuid.uuid = slice_uuid -# request.context_id.context_uuid.uuid = context_uuid -# slice_client.connect() -# response = slice_client.DeleteSlice(request) -# slice_client.close() -# -# flash('Slice "{:s}" deleted successfully!'.format(slice_uuid), 'success') -# except Exception as e: -# flash('Problem deleting slice "{:s}": {:s}'.format(slice_uuid, str(e.details())), 'danger') -# current_app.logger.exception(e) -# return redirect(url_for('slice.home')) + +@slice.get('/delete') +def delete(slice_uuid: str): + if 'context_uuid' not in session or 'topology_uuid' not in session: + flash("Please select a context!", "warning") + return redirect(url_for("main.home")) + context_uuid = session['context_uuid'] + + try: + request = SliceId() + request.slice_uuid.uuid = slice_uuid + request.context_id.context_uuid.uuid = context_uuid + slice_client.connect() + slice_client.DeleteSlice(request) + slice_client.close() + + flash('Slice "{:s}" deleted successfully!'.format(slice_uuid), 'success') + except Exception as e: + flash('Problem deleting slice "{:s}": {:s}'.format(slice_uuid, str(e.details())), 'danger') + current_app.logger.exception(e) + return redirect(url_for('slice.home')) diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 0aa022f14..35999ebe1 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -103,7 +103,7 @@ - Current Context({{ get_working_context() }})/Topology({{ get_working_topology() }}) + Selected Context({{ get_working_context() }})/Topology({{ get_working_topology() }}) diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index de8bb4a81..1b4b43f5a 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -29,13 +29,14 @@
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index b21606951..d99ede3e0 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -36,7 +36,8 @@
@@ -87,7 +88,7 @@ Kind - Type + Key/Type Value @@ -135,15 +136,43 @@ {{ constraint.endpoint_priority.priority }} + {% elif constraint.WhichOneof('constraint')=='sla_capacity' %} + + SLA Capacity + - + + {{ constraint.sla_capacity.capacity_gbps }} Gbps + + + {% elif constraint.WhichOneof('constraint')=='sla_latency' %} + + SLA E2E Latency + - + + {{ constraint.sla_latency.e2e_latency_ms }} ms + + {% elif constraint.WhichOneof('constraint')=='sla_availability' %} SLA Availability - + {{ constraint.sla_availability.availability }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active + {% elif constraint.WhichOneof('constraint')=='sla_isolation' %} + + SLA Isolation + - + + {% for i,isolation_level in enumerate(constraint.sla_isolation.isolation_level) %} + {% if i > 0 %}, {% endif %} + {{ ile.Name(isolation_level) }} + {% endfor %} + + {% else %} - @@ -185,34 +214,12 @@ {% endfor %} - - - - + @@ -258,8 +265,26 @@
Connection IdSub-serviceSub-Service Path
+ + - - - -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 390f882d7..6c8d15aed 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -32,14 +32,14 @@ Update - -
--> +
--> +
-
@@ -88,7 +88,7 @@ Kind - Type + Key/Type Value @@ -136,15 +136,43 @@ {{ constraint.endpoint_priority.priority }} + {% elif constraint.WhichOneof('constraint')=='sla_capacity' %} + + SLA Capacity + - + + {{ constraint.sla_capacity.capacity_gbps }} Gbps + + + {% elif constraint.WhichOneof('constraint')=='sla_latency' %} + + SLA E2E Latency + - + + {{ constraint.sla_latency.e2e_latency_ms }} ms + + {% elif constraint.WhichOneof('constraint')=='sla_availability' %} SLA Availability - + {{ constraint.sla_availability.availability }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active + {% elif constraint.WhichOneof('constraint')=='sla_isolation' %} + + SLA Isolation + - + + {% for i,isolation_level in enumerate(constraint.sla_isolation.isolation_level) %} + {% if i > 0 %}, {% endif %} + {{ ile.Name(isolation_level) }} + {% endfor %} + + {% else %} - @@ -191,7 +219,7 @@ - + @@ -219,7 +247,7 @@
Service IdSub-Services
- + @@ -244,4 +272,27 @@
Sub-slicesSub-Slices
-{% endblock %} \ No newline at end of file + + + + +{% endblock %} -- GitLab From 0beaad33b6820da381c0d40c776897e05f72f5b9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:08:57 +0000 Subject: [PATCH 07/43] Deploy script: - added check ignore-not-exists in delete namespace --- deploy/tfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/tfs.sh b/deploy/tfs.sh index b9bcbab4d..1f62adcd5 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -85,7 +85,7 @@ TMP_LOGS_FOLDER="$TMP_FOLDER/logs" mkdir -p $TMP_LOGS_FOLDER echo "Deleting and Creating a new namespace..." -kubectl delete namespace $TFS_K8S_NAMESPACE +kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found kubectl create namespace $TFS_K8S_NAMESPACE printf "\n" -- GitLab From 198096862eb4c33b16ae228258a54eaca9f4a775 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:15:17 +0000 Subject: [PATCH 08/43] Context component: - corrected management of Isolation Level SLA Contraints --- src/context/service/database/Constraint.py | 2 +- .../service/database/models/ConstraintModel.py | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index 0540841c3..768108d9b 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -66,7 +66,7 @@ def compose_constraints_data( constraint_name = '{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid) elif kind in { ConstraintKindEnum.SCHEDULE, ConstraintKindEnum.SLA_CAPACITY, ConstraintKindEnum.SLA_LATENCY, - ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION_LEVEL + ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION }: constraint_name = '{:s}:{:s}:'.format(parent_kind, kind.value) else: diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index 01c7bcb76..e9660d502 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -19,15 +19,17 @@ from typing import Dict from ._Base import _Base # Enum values should match name of field in Constraint message +# - enum item name should be Constraint message type in upper case +# - enum item value should be Constraint message type as it is in the proto files class ConstraintKindEnum(enum.Enum): - CUSTOM = 'custom' - SCHEDULE = 'schedule' - ENDPOINT_LOCATION = 'endpoint_location' - ENDPOINT_PRIORITY = 'endpoint_priority' - SLA_CAPACITY = 'sla_capacity' - SLA_LATENCY = 'sla_latency' - SLA_AVAILABILITY = 'sla_availability' - SLA_ISOLATION_LEVEL = 'sla_isolation' + CUSTOM = 'custom' + SCHEDULE = 'schedule' + ENDPOINT_LOCATION = 'endpoint_location' + ENDPOINT_PRIORITY = 'endpoint_priority' + SLA_CAPACITY = 'sla_capacity' + SLA_LATENCY = 'sla_latency' + SLA_AVAILABILITY = 'sla_availability' + SLA_ISOLATION = 'sla_isolation' class ConstraintModel(_Base): __tablename__ = 'constraint' -- GitLab From ddf424f1985cb526fa9710a5916a4d6df0de33d0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:18:06 +0000 Subject: [PATCH 09/43] WebUI component: - partial revert of change in templates --- src/webui/service/templates/service/detail.html | 1 - src/webui/service/templates/slice/detail.html | 1 - 2 files changed, 2 deletions(-) diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index d99ede3e0..b267f986c 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -157,7 +157,6 @@ SLA Availability - - {{ constraint.sla_availability.availability }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 6c8d15aed..2c1b55afb 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -157,7 +157,6 @@ SLA Availability - - {{ constraint.sla_availability.availability }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active -- GitLab From d7961f7572fb6c39fdb40eff1a11c1908a544475 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:33:03 +0000 Subject: [PATCH 10/43] Common - Descriptor Loader tool: - added support for dictionary and file-based loading of descriptors - added getter methods - added validation and unload methods - integrated helper methods - updated documentation --- src/common/tests/LoadScenario.py | 50 -------- src/common/tools/descriptor/Loader.py | 157 ++++++++++++++++++++++---- 2 files changed, 133 insertions(+), 74 deletions(-) delete mode 100644 src/common/tests/LoadScenario.py diff --git a/src/common/tests/LoadScenario.py b/src/common/tests/LoadScenario.py deleted file mode 100644 index 93cf3708c..000000000 --- a/src/common/tests/LoadScenario.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from service.client.ServiceClient import ServiceClient -from slice.client.SliceClient import SliceClient - -LOGGER = logging.getLogger(__name__) -LOGGERS = { - 'success': LOGGER.info, - 'danger' : LOGGER.error, - 'error' : LOGGER.error, -} - -def load_scenario_from_descriptor( - descriptor_file : str, context_client : ContextClient, device_client : DeviceClient, - service_client : ServiceClient, slice_client : SliceClient -) -> DescriptorLoader: - with open(descriptor_file, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader( - descriptors, - context_client=context_client, device_client=device_client, - service_client=service_client, slice_client=slice_client) - results = descriptor_loader.process() - - num_errors = 0 - for message,level in compose_notifications(results): - LOGGERS.get(level)(message) - if level != 'success': num_errors += 1 - if num_errors > 0: - MSG = 'Failed to load descriptors in file {:s}' - raise Exception(MSG.format(str(descriptor_file))) - - return descriptor_loader \ No newline at end of file diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py index 5972d425b..0e1d8c737 100644 --- a/src/common/tools/descriptor/Loader.py +++ b/src/common/tools/descriptor/Loader.py @@ -15,25 +15,30 @@ # SDN controller descriptor loader # Usage example (WebUI): -# descriptors = json.loads(descriptors_data_from_client) +# descriptors = json.loads( +# descriptors=descriptors_data_from_client, num_workers=10, +# context_client=..., device_client=..., service_client=..., slice_client=...) # descriptor_loader = DescriptorLoader(descriptors) # results = descriptor_loader.process() # for message,level in compose_notifications(results): # flash(message, level) # Usage example (pytest): -# with open('path/to/descriptor.json', 'r', encoding='UTF-8') as f: -# descriptors = json.loads(f.read()) # descriptor_loader = DescriptorLoader( -# descriptors, context_client=..., device_client=..., service_client=..., slice_client=...) +# descriptors_file='path/to/descriptor.json', num_workers=10, +# context_client=..., device_client=..., service_client=..., slice_client=...) # results = descriptor_loader.process() -# loggers = {'success': LOGGER.info, 'danger': LOGGER.error, 'error': LOGGER.error} -# for message,level in compose_notifications(results): -# loggers.get(level)(message) +# check_results(results, descriptor_loader) +# descriptor_loader.validate() +# # do test ... +# descriptor_loader.unload() import concurrent.futures, json, logging, operator from typing import Any, Dict, List, Optional, Tuple, Union -from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology +from common.proto.context_pb2 import ( + Connection, Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Service, ServiceId, Slice, SliceId, + Topology, TopologyId) +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient @@ -44,6 +49,11 @@ from .Tools import ( get_descriptors_add_topologies, split_devices_by_rules) LOGGER = logging.getLogger(__name__) +LOGGERS = { + 'success': LOGGER.info, + 'danger' : LOGGER.error, + 'error' : LOGGER.error, +} ENTITY_TO_TEXT = { # name => singular, plural @@ -67,25 +77,26 @@ TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_o TypeNotification = Tuple[str, str] # message, level TypeNotificationList = List[TypeNotification] -def compose_notifications(results : TypeResults) -> TypeNotificationList: - notifications = [] - for entity_name, action_name, num_ok, error_list in results: - entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] - action_infinitive, action_past = ACTION_TO_TEXT[action_name] - num_err = len(error_list) - for error in error_list: - notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error')) - if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')) - if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger')) - return notifications - class DescriptorLoader: def __init__( - self, descriptors : Union[str, Dict], num_workers : int = 1, + self, descriptors : Optional[Union[str, Dict]] = None, descriptors_file : Optional[str] = None, + num_workers : int = 1, context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None, service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None ) -> None: - self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors + if (descriptors is None) == (descriptors_file is None): + raise Exception('Exactly one of "descriptors" or "descriptors_file" is required') + + if descriptors_file is not None: + with open(descriptors_file, 'r', encoding='UTF-8') as f: + self.__descriptors = json.loads(f.read()) + self.__descriptor_file_path = descriptors_file + else: # descriptors is not None + self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors + self.__descriptor_file_path = '' + + self.__num_workers = num_workers + self.__dummy_mode = self.__descriptors.get('dummy_mode' , False) self.__contexts = self.__descriptors.get('contexts' , []) self.__topologies = self.__descriptors.get('topologies' , []) @@ -95,8 +106,6 @@ class DescriptorLoader: self.__slices = self.__descriptors.get('slices' , []) self.__connections = self.__descriptors.get('connections', []) - self.__num_workers = num_workers - self.__contexts_add = None self.__topologies_add = None self.__devices_add = None @@ -111,6 +120,24 @@ class DescriptorLoader: self.__results : TypeResults = list() + @property + def descriptor_file_path(self) -> Optional[str]: return self.__descriptor_file_path + + @property + def num_workers(self) -> int: return self.__num_workers + + @property + def context_client(self) -> Optional[ContextClient]: return self.__ctx_cli + + @property + def device_client(self) -> Optional[DeviceClient]: return self.__dev_cli + + @property + def service_client(self) -> Optional[ServiceClient]: return self.__svc_cli + + @property + def slice_client(self) -> Optional[SliceClient]: return self.__slc_cli + @property def contexts(self) -> List[Dict]: return self.__contexts @@ -269,3 +296,85 @@ class DescriptorLoader: error_list = [str_error for _,str_error in sorted(error_list, key=operator.itemgetter(0))] self.__results.append((entity_name, action_name, num_ok, error_list)) + + def validate(self) -> None: + self.__ctx_cli.connect() + + contexts = self.__ctx_cli.ListContexts(Empty()) + assert len(contexts.contexts) == self.num_contexts + + for context_uuid, num_topologies in self.num_topologies.items(): + response = self.__ctx_cli.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = self.__ctx_cli.ListDevices(Empty()) + assert len(response.devices) == self.num_devices + + response = self.__ctx_cli.ListLinks(Empty()) + assert len(response.links) == self.num_links + + for context_uuid, num_services in self.num_services.items(): + response = self.__ctx_cli.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == num_services + + for context_uuid, num_slices in self.num_slices.items(): + response = self.__ctx_cli.ListSlices(ContextId(**json_context_id(context_uuid))) + assert len(response.slices) == num_slices + + def unload(self) -> None: + self.__ctx_cli.connect() + self.__dev_cli.connect() + self.__svc_cli.connect() + self.__slc_cli.connect() + + for _, slice_list in self.slices.items(): + for slice_ in slice_list: + self.__slc_cli.DeleteSlice(SliceId(**slice_['slice_id'])) + + for _, service_list in self.services.items(): + for service in service_list: + self.__svc_cli.DeleteService(ServiceId(**service['service_id'])) + + for link in self.links: + self.__ctx_cli.RemoveLink(LinkId(**link['link_id'])) + + for device in self.devices: + self.__dev_cli.DeleteDevice(DeviceId(**device['device_id'])) + + for _, topology_list in self.topologies.items(): + for topology in topology_list: + self.__ctx_cli.RemoveTopology(TopologyId(**topology['topology_id'])) + + for context in self.contexts: + self.__ctx_cli.RemoveContext(ContextId(**context['context_id'])) + +def compose_notifications(results : TypeResults) -> TypeNotificationList: + notifications = [] + for entity_name, action_name, num_ok, error_list in results: + entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] + action_infinitive, action_past = ACTION_TO_TEXT[action_name] + num_err = len(error_list) + for error in error_list: + notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error')) + if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')) + if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger')) + return notifications + +def check_descriptor_load_results(results : TypeResults, descriptor_loader : DescriptorLoader) -> None: + num_errors = 0 + for message,level in compose_notifications(results): + LOGGERS.get(level)(message) + if level != 'success': num_errors += 1 + if num_errors > 0: + MSG = 'Failed to load descriptors from "{:s}"' + raise Exception(MSG.format(str(descriptor_loader.descriptor_file_path))) + +def validate_empty_scenario(context_client : ContextClient) -> None: + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 -- GitLab From 57bfda9d2608fc875fd1fc24761ef081d8314d42 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:33:38 +0000 Subject: [PATCH 11/43] Tools - Load Scenario: - updated according to new common load scenario tools --- src/tests/tools/load_scenario/__main__.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/tests/tools/load_scenario/__main__.py b/src/tests/tools/load_scenario/__main__.py index 3559f778d..df1d5d8bf 100644 --- a/src/tests/tools/load_scenario/__main__.py +++ b/src/tests/tools/load_scenario/__main__.py @@ -13,7 +13,7 @@ # limitations under the License. import logging, sys -from common.tests.LoadScenario import load_scenario_from_descriptor +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient @@ -29,7 +29,12 @@ def main(): slice_client = SliceClient() LOGGER.info('Loading scenario...') - load_scenario_from_descriptor(sys.argv[1], context_client, device_client, service_client, slice_client) + descriptor_loader = DescriptorLoader( + descriptors_file=sys.argv[1], context_client=context_client, device_client=device_client, + service_client=service_client, slice_client=slice_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() LOGGER.info('Done!') return 0 -- GitLab From 0066c66f09f7d50795eef2efb03deb0ca039136b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:35:27 +0000 Subject: [PATCH 12/43] Tests: - updated tests according to corrected load scenario tool - updated benchmark/policy/tests - updated ecoc22/tests - updated ofc22/tests --- .../policy/tests/test_functional_bootstrap.py | 45 +++------ .../policy/tests/test_functional_cleanup.py | 68 ++++---------- .../tests/test_functional_create_service.py | 92 +++++++------------ .../tests/test_functional_delete_service.py | 91 +++++++----------- .../ecoc22/tests/test_functional_bootstrap.py | 48 ++-------- .../ecoc22/tests/test_functional_cleanup.py | 66 ++----------- .../tests/test_functional_create_service.py | 54 ++--------- .../tests/test_functional_delete_service.py | 73 ++++----------- .../ofc22/tests/test_functional_bootstrap.py | 46 ++-------- .../ofc22/tests/test_functional_cleanup.py | 66 ++----------- .../tests/test_functional_create_service.py | 57 +++--------- .../tests/test_functional_delete_service.py | 71 ++++---------- 12 files changed, 190 insertions(+), 587 deletions(-) diff --git a/src/tests/benchmark/policy/tests/test_functional_bootstrap.py b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py index 65c46b4eb..ca1882aaa 100644 --- a/src/tests/benchmark/policy/tests/test_functional_bootstrap.py +++ b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py @@ -13,10 +13,10 @@ # limitations under the License. import logging, time +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, Empty from common.proto.monitoring_pb2 import KpiDescriptorList -from common.tests.LoadScenario import load_scenario_from_descriptor -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -27,44 +27,25 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_scenario_bootstrap( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure database is empty ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 + validate_empty_scenario(context_client) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - - # ----- Load Scenario ---------------------------------------------------------------------------------------------- - descriptor_loader = load_scenario_from_descriptor( - DESCRIPTOR_FILE, context_client, device_client, None, None) - - - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 def test_scenario_kpis_created( context_client : ContextClient, # pylint: disable=redefined-outer-name diff --git a/src/tests/benchmark/policy/tests/test_functional_cleanup.py b/src/tests/benchmark/policy/tests/test_functional_cleanup.py index e00c5ceee..122526840 100644 --- a/src/tests/benchmark/policy/tests/test_functional_cleanup.py +++ b/src/tests/benchmark/policy/tests/test_functional_cleanup.py @@ -13,9 +13,10 @@ # limitations under the License. import logging -from common.tools.descriptor.Loader import DescriptorLoader +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from common.tools.object_factory.Context import json_context_id -from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from tests.Fixtures import context_client, device_client # pylint: disable=unused-import @@ -24,57 +25,20 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_services_removed( +def test_scenario_cleanup( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - - # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- - for link in descriptor_loader.links: - context_client.RemoveLink(LinkId(**link['link_id'])) - - for device in descriptor_loader.devices: - device_client .DeleteDevice(DeviceId(**device['device_id'])) - - for context_uuid, topology_list in descriptor_loader.topologies.items(): - for topology in topology_list: - context_client.RemoveTopology(TopologyId(**topology['topology_id'])) - - for context in descriptor_loader.contexts: - context_client.RemoveContext(ContextId(**context['context_id'])) - - - # ----- List entities - Ensure database is empty again ------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/benchmark/policy/tests/test_functional_create_service.py b/src/tests/benchmark/policy/tests/test_functional_create_service.py index 919f81979..dd7761f38 100644 --- a/src/tests/benchmark/policy/tests/test_functional_create_service.py +++ b/src/tests/benchmark/policy/tests/test_functional_create_service.py @@ -13,83 +13,61 @@ # limitations under the License. import logging, random -from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextId, Empty +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.descriptor.Loader import DescriptorLoader from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient -from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import +from tests.Fixtures import context_client, monitoring_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM -from .Fixtures import osm_wim # pylint: disable=unused-import +from .Fixtures import osm_wim # pylint: disable=unused-import from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value -DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value - DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - # ----- Create Service --------------------------------------------------------------------------------------------- + # Create Connectivity Service service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI) - - for service in response.services: - service_id = service.service_id - response = context_client.ListConnections(service_id) - LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), - grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # one connection per service + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice + + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI + + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + + if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: + assert len(response.connections) == 1 # 1 connection per service + elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: + assert len(response.connections) == 1 # 1 connection per service + else: + str_service = grpc_message_to_json_string(service) + raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) def test_scenario_kpi_values_created( diff --git a/src/tests/benchmark/policy/tests/test_functional_delete_service.py b/src/tests/benchmark/policy/tests/test_functional_delete_service.py index 6f6ca6029..4fffc115e 100644 --- a/src/tests/benchmark/policy/tests/test_functional_delete_service.py +++ b/src/tests/benchmark/policy/tests/test_functional_delete_service.py @@ -14,86 +14,61 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader -from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from tests.Fixtures import context_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM -from .Fixtures import osm_wim # pylint: disable=unused-import - +from .Fixtures import osm_wim # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value -DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value - DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' - +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice - descriptor_loader = DescriptorLoader(descriptors) + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - l3nm_service_uuids = set() - response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) - assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) + service_uuids = set() for service in response.services: service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: + assert len(response.connections) == 1 # 1 connection per service service_uuid = service_id.service_uuid.uuid - l3nm_service_uuids.add(service_uuid) + service_uuids.add(service_uuid) osm_wim.conn_info[service_uuid] = {} - - response = context_client.ListConnections(service_id) - LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), - grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # one connection per service + elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: + assert len(response.connections) == 1 # 1 connection per service + else: + str_service = grpc_message_to_json_string(service) + raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) # Identify service to delete - assert len(l3nm_service_uuids) == 1 # assume a single L3NM service has been created - l3nm_service_uuid = set(l3nm_service_uuids).pop() - - - # ----- Delete Service --------------------------------------------------------------------------------------------- - osm_wim.delete_connectivity_service(l3nm_service_uuid) - - - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies + assert len(service_uuids) == 1 # assume a single L3NM service has been created + service_uuid = set(service_uuids).pop() - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices + # Delete Connectivity Service + osm_wim.delete_connectivity_service(service_uuid) - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py index 3b7b5009c..05691d0b2 100644 --- a/src/tests/ecoc22/tests/test_functional_bootstrap.py +++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py @@ -14,8 +14,8 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty -from common.tests.LoadScenario import load_scenario_from_descriptor +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -31,45 +31,15 @@ def test_scenario_bootstrap( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure database is empty ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 + validate_empty_scenario(context_client) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - - # ----- Load Scenario ---------------------------------------------------------------------------------------------- - descriptor_loader = load_scenario_from_descriptor( - DESCRIPTOR_FILE, context_client, device_client, None, None) - - - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py index 3e8b5ea65..088c19799 100644 --- a/src/tests/ecoc22/tests/test_functional_cleanup.py +++ b/src/tests/ecoc22/tests/test_functional_cleanup.py @@ -14,8 +14,8 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId -from common.tools.descriptor.Loader import DescriptorLoader +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -27,64 +27,18 @@ LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) -def test_services_removed( +def test_scenario_cleanup( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- - for link in descriptor_loader.links: - context_client.RemoveLink(LinkId(**link['link_id'])) - - for device in descriptor_loader.devices: - device_client .DeleteDevice(DeviceId(**device['device_id'])) - - for context_uuid, topology_list in descriptor_loader.topologies.items(): - for topology in topology_list: - context_client.RemoveTopology(TopologyId(**topology['topology_id'])) - - for context in descriptor_loader.contexts: - context_client.RemoveContext(ContextId(**context['context_id'])) - - - # ----- List entities - Ensure database is empty again ------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py index 6dd4eb827..dab9c7eb1 100644 --- a/src/tests/ecoc22/tests/test_functional_create_service.py +++ b/src/tests/ecoc22/tests/test_functional_create_service.py @@ -14,7 +14,7 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id @@ -31,57 +31,23 @@ DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Create Service --------------------------------------------------------------------------------------------- + # Create Connectivity Service service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py index 5cfdc3473..710e1a817 100644 --- a/src/tests/ecoc22/tests/test_functional_delete_service.py +++ b/src/tests/ecoc22/tests/test_functional_delete_service.py @@ -14,14 +14,14 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader -from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient -from tests.Fixtures import context_client # pylint: disable=unused-import +from tests.Fixtures import context_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM -from .Fixtures import osm_wim # pylint: disable=unused-import +from .Fixtures import osm_wim # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -30,44 +30,27 @@ DESCRIPTOR_FILE = 'ecoc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice - service_uuids = set() response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) assert len(response.services) == 3 # 1xL2NM + 2xTAPI + service_uuids = set() for service in response.services: service_id = service.service_id - - if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM: - service_uuid = service_id.service_uuid.uuid - service_uuids.add(service_uuid) - osm_wim.conn_info[service_uuid] = {} - response = context_client.ListConnections(service_id) LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM: assert len(response.connections) == 2 # 2 connections per service (primary + backup) + service_uuid = service_id.service_uuid.uuid + service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: assert len(response.connections) == 1 # 1 connection per service else: @@ -78,34 +61,14 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p assert len(service_uuids) == 1 # assume a single L2NM service has been created service_uuid = set(service_uuids).pop() - - # ----- Delete Service --------------------------------------------------------------------------------------------- + # Delete Connectivity Service osm_wim.delete_connectivity_service(service_uuid) - - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py index ad2d5703a..ca1882aaa 100644 --- a/src/tests/ofc22/tests/test_functional_bootstrap.py +++ b/src/tests/ofc22/tests/test_functional_bootstrap.py @@ -16,7 +16,7 @@ import logging, time from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, Empty from common.proto.monitoring_pb2 import KpiDescriptorList -from common.tests.LoadScenario import load_scenario_from_descriptor +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -33,45 +33,15 @@ def test_scenario_bootstrap( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure database is empty ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 + validate_empty_scenario(context_client) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - - # ----- Load Scenario ---------------------------------------------------------------------------------------------- - descriptor_loader = load_scenario_from_descriptor( - DESCRIPTOR_FILE, context_client, device_client, None, None) - - - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py index d38b653b2..122526840 100644 --- a/src/tests/ofc22/tests/test_functional_cleanup.py +++ b/src/tests/ofc22/tests/test_functional_cleanup.py @@ -14,8 +14,8 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId -from common.tools.descriptor.Loader import DescriptorLoader +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -27,64 +27,18 @@ LOGGER.setLevel(logging.DEBUG) DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) -def test_services_removed( +def test_scenario_cleanup( context_client : ContextClient, # pylint: disable=redefined-outer-name device_client : DeviceClient, # pylint: disable=redefined-outer-name ) -> None: - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, _ in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == 0 - - for context_uuid, _ in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == 0 - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- - for link in descriptor_loader.links: - context_client.RemoveLink(LinkId(**link['link_id'])) - - for device in descriptor_loader.devices: - device_client .DeleteDevice(DeviceId(**device['device_id'])) - - for context_uuid, topology_list in descriptor_loader.topologies.items(): - for topology in topology_list: - context_client.RemoveTopology(TopologyId(**topology['topology_id'])) - - for context in descriptor_loader.contexts: - context_client.RemoveContext(ContextId(**context['context_id'])) - - - # ----- List entities - Ensure database is empty again ------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == 0 - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py index 92e0a74f9..dd7761f38 100644 --- a/src/tests/ofc22/tests/test_functional_create_service.py +++ b/src/tests/ofc22/tests/test_functional_create_service.py @@ -21,7 +21,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient -from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import +from tests.Fixtures import context_client, monitoring_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM from .Fixtures import osm_wim # pylint: disable=unused-import from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE @@ -33,61 +33,27 @@ DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - - # ----- Create Service --------------------------------------------------------------------------------------------- + # Create Connectivity Service service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI for service in response.services: service_id = service.service_id @@ -104,7 +70,6 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) - def test_scenario_kpi_values_created( monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name ) -> None: diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py index 1811f219a..4fffc115e 100644 --- a/src/tests/ofc22/tests/test_functional_delete_service.py +++ b/src/tests/ofc22/tests/test_functional_delete_service.py @@ -14,10 +14,10 @@ import logging from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.proto.context_pb2 import ContextId, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader -from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from tests.Fixtures import context_client # pylint: disable=unused-import from tests.tools.mock_osm.MockOSM import MockOSM @@ -30,44 +30,27 @@ DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure service is created ------------------------------------------------------------------ - with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: - descriptors = f.read() - - descriptor_loader = DescriptorLoader(descriptors) - - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links + # Ensure slices and services are created + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 1 # OSM slice - service_uuids = set() response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) + assert len(response.services) == 2 # 1xL3NM + 1xTAPI + service_uuids = set() for service in response.services: service_id = service.service_id - - if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: - service_uuid = service_id.service_uuid.uuid - service_uuids.add(service_uuid) - osm_wim.conn_info[service_uuid] = {} - response = context_client.ListConnections(service_id) LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: assert len(response.connections) == 1 # 1 connection per service + service_uuid = service_id.service_uuid.uuid + service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} elif service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: assert len(response.connections) == 1 # 1 connection per service else: @@ -78,34 +61,14 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p assert len(service_uuids) == 1 # assume a single L3NM service has been created service_uuid = set(service_uuids).pop() - - # ----- Delete Service --------------------------------------------------------------------------------------------- + # Delete Connectivity Service osm_wim.delete_connectivity_service(service_uuid) - - # ----- List entities - Ensure service is removed ------------------------------------------------------------------ - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == descriptor_loader.num_contexts - - for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): - response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) - assert len(response.topologies) == num_topologies - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == descriptor_loader.num_devices - - response = context_client.ListLinks(Empty()) - assert len(response.links) == descriptor_loader.num_links - - for context_uuid, num_services in descriptor_loader.num_services.items(): - response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) - assert len(response.services) == num_services - - for context_uuid, num_slices in descriptor_loader.num_slices.items(): - response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) - assert len(response.slices) == num_slices - - # This scenario assumes no services are created beforehand + # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader(descriptors_file=DESCRIPTOR_FILE, context_client=context_client) + descriptor_loader.validate() -- GitLab From 9243a1e639575af5fadd9e8625501d3d6fb7f6ed Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 11:31:41 +0000 Subject: [PATCH 13/43] WebUI component: - moved old files --- src/webui/{ => old}/grafana_backup_dashboard.json | 0 src/webui/{ => old}/grafana_dashboard.json | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename src/webui/{ => old}/grafana_backup_dashboard.json (100%) rename src/webui/{ => old}/grafana_dashboard.json (100%) diff --git a/src/webui/grafana_backup_dashboard.json b/src/webui/old/grafana_backup_dashboard.json similarity index 100% rename from src/webui/grafana_backup_dashboard.json rename to src/webui/old/grafana_backup_dashboard.json diff --git a/src/webui/grafana_dashboard.json b/src/webui/old/grafana_dashboard.json similarity index 100% rename from src/webui/grafana_dashboard.json rename to src/webui/old/grafana_dashboard.json -- GitLab From b3341edc3f631d564fa229d702207cc2ec513903 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 12:17:50 +0000 Subject: [PATCH 14/43] Monitoring component: - updated name of variable containing target table --- src/monitoring/.gitlab-ci.yml | 2 +- src/monitoring/service/MonitoringServiceServicerImpl.py | 8 ++++---- src/monitoring/tests/test_unitary.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/monitoring/.gitlab-ci.yml b/src/monitoring/.gitlab-ci.yml index ff620c534..7c3a14975 100644 --- a/src/monitoring/.gitlab-ci.yml +++ b/src/monitoring/.gitlab-ci.yml @@ -56,7 +56,7 @@ unit_test monitoring: - docker pull questdb/questdb - docker run --name questdb -d -p 9000:9000 -p 9009:9009 -p 8812:8812 -p 9003:9003 -e QDB_CAIRO_COMMIT_LAG=1000 -e QDB_CAIRO_MAX_UNCOMMITTED_ROWS=100000 --network=teraflowbridge --rm questdb/questdb - sleep 10 - - docker run --name $IMAGE_NAME -d -p 7070:7070 --env METRICSDB_HOSTNAME=questdb --env METRICSDB_ILP_PORT=9009 --env METRICSDB_REST_PORT=9000 --env METRICSDB_TABLE=monitoring -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - docker run --name $IMAGE_NAME -d -p 7070:7070 --env METRICSDB_HOSTNAME=questdb --env METRICSDB_ILP_PORT=9009 --env METRICSDB_REST_PORT=9000 --env METRICSDB_TABLE_MONITORING_KPIS=tfs_monitoring_kpis -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - sleep 30 - docker ps -a - docker logs $IMAGE_NAME diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py index 0bbce1509..f408734df 100644 --- a/src/monitoring/service/MonitoringServiceServicerImpl.py +++ b/src/monitoring/service/MonitoringServiceServicerImpl.py @@ -47,7 +47,7 @@ MONITORING_INCLUDEKPI_COUNTER = Counter('monitoring_includekpi_counter', 'Monito METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME") METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT") METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT") -METRICSDB_TABLE = os.environ.get("METRICSDB_TABLE") +METRICSDB_TABLE_MONITORING_KPIS = os.environ.get("METRICSDB_TABLE_MONITORING_KPIS") class MonitoringServiceServicerImpl(MonitoringServiceServicer): def __init__(self, name_mapping : NameMapping): @@ -57,7 +57,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): self.management_db = ManagementDBTools.ManagementDB('monitoring.db') self.deviceClient = DeviceClient() self.metrics_db = MetricsDBTools.MetricsDB( - METRICSDB_HOSTNAME, name_mapping, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE) + METRICSDB_HOSTNAME, name_mapping, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE_MONITORING_KPIS) self.subs_manager = SubscriptionManager(self.metrics_db) self.alarm_manager = AlarmManager(self.metrics_db) LOGGER.info('MetricsDB initialized') @@ -592,8 +592,8 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id))) response.kpi_id.kpi_id.uuid = "NoID" else: - query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE} WHERE kpi_id = '{kpi_id}' " \ - f"LATEST ON timestamp PARTITION BY kpi_id" + query = f"SELECT kpi_id, timestamp, kpi_value FROM {METRICSDB_TABLE_MONITORING_KPIS} " \ + f"WHERE kpi_id = '{kpi_id}' LATEST ON timestamp PARTITION BY kpi_id" data = self.metrics_db.run_query(query) LOGGER.debug(data) if len(data) == 0: diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index 1428b0ed5..c883f9d14 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -75,7 +75,7 @@ os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_POR METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') METRICSDB_ILP_PORT = os.environ.get('METRICSDB_ILP_PORT') METRICSDB_REST_PORT = os.environ.get('METRICSDB_REST_PORT') -METRICSDB_TABLE = os.environ.get('METRICSDB_TABLE') +METRICSDB_TABLE_MONITORING_KPIS = os.environ.get('METRICSDB_TABLE_MONITORING_KPIS') LOGGER = logging.getLogger(__name__) @@ -193,7 +193,7 @@ def management_db(): def metrics_db(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name return monitoring_service.monitoring_servicer.metrics_db #_metrics_db = MetricsDBTools.MetricsDB( - # METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE) + # METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE_MONITORING_KPIS) #return _metrics_db @pytest.fixture(scope='session') -- GitLab From cfd8a206f9b873d42cea695748e6f4c7d2df9d2b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 12:28:02 +0000 Subject: [PATCH 15/43] WebUI component: - updated dashboard's table name - renamed dashboard's file name --- ...sql.json => grafana_db_mon_kpis_psql.json} | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) rename src/webui/{grafana_dashboard_psql.json => grafana_db_mon_kpis_psql.json} (91%) diff --git a/src/webui/grafana_dashboard_psql.json b/src/webui/grafana_db_mon_kpis_psql.json similarity index 91% rename from src/webui/grafana_dashboard_psql.json rename to src/webui/grafana_db_mon_kpis_psql.json index ec89c1647..750e5254e 100644 --- a/src/webui/grafana_dashboard_psql.json +++ b/src/webui/grafana_db_mon_kpis_psql.json @@ -33,7 +33,7 @@ { "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, "fieldConfig": { "defaults": { @@ -162,14 +162,14 @@ { "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, "format": "time_series", "group": [], "hide": false, "metricColumn": "kpi_value", "rawQuery": true, - "rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_name, endpoint_name, kpi_sample_type\r\nFROM\r\n tfs_monitoring\r\nWHERE\r\n $__timeFilter(timestamp) AND device_name IN (${device_name}) AND endpoint_name IN (${endpoint_name}) AND kpi_sample_type IN (${kpi_sample_type})\r\nGROUP BY\r\n device_name, endpoint_name, kpi_sample_type\r\nORDER BY\r\n timestamp", + "rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_name, endpoint_name, kpi_sample_type\r\nFROM\r\n tfs_monitoring_kpis\r\nWHERE\r\n $__timeFilter(timestamp) AND device_name IN (${device_name}) AND endpoint_name IN (${endpoint_name}) AND kpi_sample_type IN (${kpi_sample_type})\r\nGROUP BY\r\n device_name, endpoint_name, kpi_sample_type\r\nORDER BY\r\n timestamp", "refId": "A", "select": [ [ @@ -181,7 +181,7 @@ } ] ], - "table": "monitoring", + "table": "tfs_monitoring_kpis", "timeColumn": "timestamp", "where": [ { @@ -227,16 +227,16 @@ }, "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, - "definition": "SELECT DISTINCT device_name FROM tfs_monitoring;", + "definition": "SELECT DISTINCT device_name FROM tfs_monitoring_kpis;", "hide": 0, "includeAll": true, "label": "Device", "multi": true, "name": "device_name", "options": [], - "query": "SELECT DISTINCT device_name FROM tfs_monitoring;", + "query": "SELECT DISTINCT device_name FROM tfs_monitoring_kpis;", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -255,16 +255,16 @@ }, "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, - "definition": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})", + "definition": "SELECT DISTINCT endpoint_name FROM tfs_monitoring_kpis WHERE device_name IN (${device_name})", "hide": 0, "includeAll": true, "label": "EndPoint", "multi": true, "name": "endpoint_name", "options": [], - "query": "SELECT DISTINCT endpoint_name FROM tfs_monitoring WHERE device_name IN (${device_name})", + "query": "SELECT DISTINCT endpoint_name FROM tfs_monitoring_kpis WHERE device_name IN (${device_name})", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -283,16 +283,16 @@ }, "datasource": { "type": "postgres", - "uid": "questdb" + "uid": "questdb-mon-kpi" }, - "definition": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;", + "definition": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring_kpis;", "hide": 0, "includeAll": true, "label": "Kpi Sample Type", "multi": true, "name": "kpi_sample_type", "options": [], - "query": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring;", + "query": "SELECT DISTINCT kpi_sample_type FROM tfs_monitoring_kpis;", "refresh": 2, "regex": "", "skipUrlSync": false, @@ -308,7 +308,7 @@ "timepicker": {}, "timezone": "utc", "title": "L3 Monitoring", - "uid": "tf-l3-monit", + "uid": "tfs-l3-monit", "version": 6, "weekStart": "" } -- GitLab From e5e54f18fa589df50a3d9b7d66a57adc75cf0c5b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 12:43:20 +0000 Subject: [PATCH 16/43] Deploy scripts: - enabled support to drop QuestDB tables during deploy - renamed variable QDB_TABLE to QDB_TABLE_MONITORING_KPIS - renamed default table name tfs_monitoring to tfs_monitoring_kpis - renamed secret attribute METRICSDB_TABLE to METRICSDB_TABLE_MONITORING_KPIS - improved log messages of tfs.sh - updated deploy scripts accordingly - removed unused variables and commands - corrected documentation of some variables --- deploy/all.sh | 40 ++++-------- deploy/crdb.sh | 3 - deploy/nats.sh | 8 --- deploy/qdb.sh | 42 ++++++------ deploy/tfs.sh | 49 +++++++++----- my_deploy.sh | 18 ++--- src/tests/benchmark/policy/deploy_specs.sh | 76 ++++++++++++++++++---- src/tests/ecoc22/deploy_specs.sh | 19 +++--- src/tests/ofc22/deploy_specs.sh | 19 +++--- 9 files changed, 155 insertions(+), 119 deletions(-) diff --git a/deploy/all.sh b/deploy/all.sh index a99607f5b..09239afed 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -25,14 +25,14 @@ # By default, assume internal MicroK8s registry is used. export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} -# If not already set, set the list of components you want to build images for, and deploy. +# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} -# If not already set, set the name of the Kubernetes namespace to deploy to. +# If not already set, set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # If not already set, set additional manifest files to be applied after the deployment @@ -41,7 +41,7 @@ export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} # If not already set, set the new Grafana admin password export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} -# If not already set, disable skip-build flag. +# If not already set, disable skip-build flag to rebuild the Docker images. # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} @@ -60,12 +60,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} # If not already set, set the database name to be used by Context. export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} -# If not already set, set the name of the secret where CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAME=${CRDB_SECRET_NAME:-"crdb-data"} - -# If not already set, set the namespace where the secret containing CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} - # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. # "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while # checking/deploying CockroachDB. @@ -78,7 +72,7 @@ export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} # Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} -# If not already set, disable flag for dropping database if exists. +# If not already set, disable flag for dropping database, if it exists. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while # checking/deploying CockroachDB. @@ -96,12 +90,6 @@ export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""} # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} -# If not already set, set the name of the secret where NATS data and credentials will be stored. -export NATS_SECRET_NAME=${NATS_SECRET_NAME:-"nats-data"} - -# If not already set, set the namespace where the secret containing NATS data and credentials will be stored. -export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} - # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. @@ -113,20 +101,20 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} -# If not already set, set the database username to be used by Monitoring. +# If not already set, set the database username to be used for QuestDB. export QDB_USERNAME=${QDB_USERNAME:-"admin"} -# If not already set, set the database user's password to be used by Monitoring. +# If not already set, set the database user's password to be used for QuestDB. export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} -## If not already set, disable flag for dropping table if exists. -## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! -## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while -## checking/deploying QuestDB. -#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""} +# If not already set, disable flag for dropping tables if they exist. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! +# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variable +# QDB_TABLE_MONITORING_KPIS will be dropped while checking/deploying QuestDB. +export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} # If not already set, disable flag for re-deploying QuestDB from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! diff --git a/deploy/crdb.sh b/deploy/crdb.sh index 98d011f19..4e8cfe2c3 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -66,9 +66,6 @@ CRDB_MANIFESTS_PATH="manifests/cockroachdb" # Create a tmp folder for files modified during the deployment TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" mkdir -p $TMP_MANIFESTS_FOLDER -TMP_LOGS_FOLDER="$TMP_FOLDER/logs" -mkdir -p $TMP_LOGS_FOLDER -CRDB_LOG_FILE="$TMP_LOGS_FOLDER/crdb_deploy.log" function crdb_deploy_single() { echo "CockroachDB Namespace" diff --git a/deploy/nats.sh b/deploy/nats.sh index 115a18530..9edbc7765 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -31,14 +31,6 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} # Automated steps start here ######################################################################################################################## -# Constants -TMP_FOLDER="./tmp" -NATS_MANIFESTS_PATH="manifests/nats" - -# Create a tmp folder for files modified during the deployment -TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" -mkdir -p $TMP_MANIFESTS_FOLDER - function nats_deploy_single() { echo "NATS Namespace" echo ">>> Create NATS Namespace (if missing)" diff --git a/deploy/qdb.sh b/deploy/qdb.sh index d9a4de353..a65408804 100755 --- a/deploy/qdb.sh +++ b/deploy/qdb.sh @@ -21,20 +21,20 @@ # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} -# If not already set, set the database username to be used by Monitoring. +# If not already set, set the database username to be used for QuestDB. export QDB_USERNAME=${QDB_USERNAME:-"admin"} -# If not already set, set the database user's password to be used by Monitoring. +# If not already set, set the database user's password to be used for QuestDB. export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} -## If not already set, disable flag for dropping table if exists. -## WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! -## If QDB_DROP_TABLE_IF_EXISTS is "YES", the table pointed by variable QDB_TABLE will be dropped while -## checking/deploying QuestDB. -#export QDB_DROP_TABLE_IF_EXISTS=${QDB_DROP_TABLE_IF_EXISTS:-""} +# If not already set, disable flag for dropping tables if they exist. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! +# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variable +# QDB_TABLE_MONITORING_KPIS will be dropped while checking/deploying QuestDB. +export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} # If not already set, disable flag for re-deploying QuestDB from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! @@ -52,9 +52,6 @@ QDB_MANIFESTS_PATH="manifests/questdb" # Create a tmp folder for files modified during the deployment TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" -TMP_LOGS_FOLDER="$TMP_FOLDER/logs" -QDB_LOG_FILE="$TMP_LOGS_FOLDER/qdb_deploy.log" -mkdir -p $TMP_LOGS_FOLDER function qdb_deploy() { echo "QuestDB Namespace" @@ -147,19 +144,18 @@ function qdb_undeploy() { echo } -# TODO: implement method to drop table -#function qdb_drop_table() { -# echo "Drop table if exists" -# QDB_CLIENT_URL="postgresql://${QDB_USERNAME}:${QDB_PASSWORD}@questdb-0:${QDB_SQL_PORT}/defaultdb?sslmode=require" -# kubectl exec -it --namespace ${QDB_NAMESPACE} questdb-0 -- \ -# ./qdb sql --certs-dir=/qdb/qdb-certs --url=${QDB_CLIENT_URL} \ -# --execute "DROP TABLE IF EXISTS ${QDB_TABLE};" -# echo -#} +function qdb_drop_tables() { + QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}') + QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') + + echo "Drop tables, if exist" + curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;" + echo +} if [ "$QDB_REDEPLOY" == "YES" ]; then qdb_undeploy -#elif [ "$QDB_DROP_TABLE_IF_EXISTS" == "YES" ]; then -# qdb_drop_table +elif [ "$QDB_DROP_TABLES_IF_EXIST" == "YES" ]; then + qdb_drop_tables fi qdb_deploy diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 1f62adcd5..2bacc8cac 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -18,18 +18,21 @@ # Read deployment settings ######################################################################################################################## + +# ----- TeraFlowSDN ------------------------------------------------------------ + # If not already set, set the URL of the Docker registry where the images will be uploaded to. # By default, assume internal MicroK8s registry is used. export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} -# If not already set, set the list of components you want to build images for, and deploy. +# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} -# If not already set, set the name of the Kubernetes namespace to deploy to. +# If not already set, set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # If not already set, set additional manifest files to be applied after the deployment @@ -38,10 +41,13 @@ export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} # If not already set, set the new Grafana admin password export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} -# If not already set, disable skip-build flag. +# If not already set, disable skip-build flag to rebuild the Docker images. # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} + +# ----- CockroachDB ------------------------------------------------------------ + # If not already set, set the namespace where CockroackDB will be deployed. export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} @@ -54,20 +60,26 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} # If not already set, set the database name to be used by Context. export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + +# ----- NATS ------------------------------------------------------------------- + # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} + +# ----- QuestDB ---------------------------------------------------------------- + # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} -# If not already set, set the database username to be used by Monitoring. +# If not already set, set the database username to be used for QuestDB. export QDB_USERNAME=${QDB_USERNAME:-"admin"} -# If not already set, set the database user's password to be used by Monitoring. +# If not already set, set the database user's password to be used for QuestDB. export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE=${QDB_TABLE:-"tfs_monitoring"} +# If not already set, set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} ######################################################################################################################## @@ -118,7 +130,7 @@ kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type=' --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \ --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \ --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \ - --from-literal=METRICSDB_TABLE=${QDB_TABLE} \ + --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \ --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \ --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} printf "\n" @@ -301,7 +313,8 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" # Configure Grafana Admin Password # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}" - echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..." + + echo ">> Updating Grafana 'admin' password..." curl -X PUT -H "Content-Type: application/json" -d '{ "oldPassword": "'${GRAFANA_PASSWORD}'", "newPassword": "'${TFS_GRAFANA_PASSWORD}'", @@ -314,15 +327,14 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/ - # TODO: replace user, password and database by variables to be saved QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}" - echo "Creating a datasource..." + echo ">> Creating datasources..." curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ "access" : "proxy", "type" : "postgres", - "name" : "questdb", + "name" : "questdb-mon-kpi", "url" : "'${QDB_HOST_PORT}'", - "database" : "'${QDB_TABLE}'", + "database" : "'${QDB_TABLE_MONITORING_KPIS}'", "user" : "'${QDB_USERNAME}'", "basicAuth": false, "isDefault": true, @@ -342,16 +354,17 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" }' ${GRAFANA_URL_UPDATED}/api/datasources echo - # Create Monitoring Dashboard + echo ">> Creating dashboards..." # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/ - curl -X POST -H "Content-Type: application/json" \ - -d '@src/webui/grafana_dashboard_psql.json' \ + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \ ${GRAFANA_URL_UPDATED}/api/dashboards/db echo - DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tf-l3-monit" + echo ">> Staring dashboards..." + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit" DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} + echo printf "\n\n" fi diff --git a/my_deploy.sh b/my_deploy.sh index 6f0e64afe..1efea75bb 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -56,7 +56,7 @@ export CRDB_DATABASE="tfs" # See ./deploy/all.sh or ./deploy/crdb.sh for additional details export CRDB_DEPLOY_MODE="single" -# Disable flag for dropping database, if exists. +# Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. @@ -74,20 +74,20 @@ export NATS_REDEPLOY="" # ----- QuestDB ---------------------------------------------------------------- -# If not already set, set the namespace where QuestDB will be deployed. +# Set the namespace where QuestDB will be deployed. export QDB_NAMESPACE="qdb" -# If not already set, set the database username to be used by Monitoring. +# Set the database username to be used for QuestDB. export QDB_USERNAME="admin" -# If not already set, set the database user's password to be used by Monitoring. +# Set the database user's password to be used for QuestDB. export QDB_PASSWORD="quest" -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE="tfs_monitoring" +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" -## If not already set, disable flag for dropping table if exists. -#export QDB_DROP_TABLE_IF_EXISTS="" +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" -# If not already set, disable flag for re-deploying QuestDB from scratch. +# Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/src/tests/benchmark/policy/deploy_specs.sh b/src/tests/benchmark/policy/deploy_specs.sh index 12a45ef92..7d408f003 100755 --- a/src/tests/benchmark/policy/deploy_specs.sh +++ b/src/tests/benchmark/policy/deploy_specs.sh @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,21 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Set the URL of your local Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -# Supported components are: -# context device automation policy service compute monitoring webui -# interdomain slice pathcomp dlt -# dbscanserving opticalattackmitigator opticalattackdetector -# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" -# Set the name of the Kubernetes namespace to deploy to. +# Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment @@ -35,6 +33,60 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" # Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" -# If not already set, disable skip-build flag. -# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. -export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set the database name to be used by Context. +export CRDB_DATABASE="tfs" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh index 874774e1c..6c3d9db66 100755 --- a/src/tests/ecoc22/deploy_specs.sh +++ b/src/tests/ecoc22/deploy_specs.sh @@ -20,7 +20,6 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator" export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. @@ -57,7 +56,7 @@ export CRDB_DATABASE="tfs" # See ./deploy/all.sh or ./deploy/crdb.sh for additional details export CRDB_DEPLOY_MODE="single" -# Disable flag for dropping database, if exists. +# Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. @@ -75,20 +74,20 @@ export NATS_REDEPLOY="" # ----- QuestDB ---------------------------------------------------------------- -# If not already set, set the namespace where QuestDB will be deployed. +# Set the namespace where QuestDB will be deployed. export QDB_NAMESPACE="qdb" -# If not already set, set the database username to be used by Monitoring. +# Set the database username to be used for QuestDB. export QDB_USERNAME="admin" -# If not already set, set the database user's password to be used by Monitoring. +# Set the database user's password to be used for QuestDB. export QDB_PASSWORD="quest" -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE="tfs_monitoring" +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" -## If not already set, disable flag for dropping table if exists. -#export QDB_DROP_TABLE_IF_EXISTS="" +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" -# If not already set, disable flag for re-deploying QuestDB from scratch. +# Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh index 874774e1c..6c3d9db66 100755 --- a/src/tests/ofc22/deploy_specs.sh +++ b/src/tests/ofc22/deploy_specs.sh @@ -20,7 +20,6 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -#export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator" export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. @@ -57,7 +56,7 @@ export CRDB_DATABASE="tfs" # See ./deploy/all.sh or ./deploy/crdb.sh for additional details export CRDB_DEPLOY_MODE="single" -# Disable flag for dropping database, if exists. +# Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. @@ -75,20 +74,20 @@ export NATS_REDEPLOY="" # ----- QuestDB ---------------------------------------------------------------- -# If not already set, set the namespace where QuestDB will be deployed. +# Set the namespace where QuestDB will be deployed. export QDB_NAMESPACE="qdb" -# If not already set, set the database username to be used by Monitoring. +# Set the database username to be used for QuestDB. export QDB_USERNAME="admin" -# If not already set, set the database user's password to be used by Monitoring. +# Set the database user's password to be used for QuestDB. export QDB_PASSWORD="quest" -# If not already set, set the table name to be used by Monitoring. -export QDB_TABLE="tfs_monitoring" +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" -## If not already set, disable flag for dropping table if exists. -#export QDB_DROP_TABLE_IF_EXISTS="" +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" -# If not already set, disable flag for re-deploying QuestDB from scratch. +# Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" -- GitLab From 7b66a4b18e3bdd16d0d7d42bc090db7ca6121410 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 13:31:25 +0000 Subject: [PATCH 17/43] Context component: - removed unneeded log message --- src/context/service/database/Constraint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index 768108d9b..b37d0dcad 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -107,7 +107,7 @@ def upsert_constraints( #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}) #LOGGER.warning('delete stmt={:s}'.format(str(str_stmt))) constraint_deletes = session.execute(stmt) - LOGGER.warning('constraint_deletes.rowcount={:s}'.format(str(constraint_deletes.rowcount))) + #LOGGER.warning('constraint_deletes.rowcount={:s}'.format(str(constraint_deletes.rowcount))) delete_affected = int(constraint_deletes.rowcount) > 0 upsert_affected = False -- GitLab From e94ec782fff459850af83692ba4bd80b008ebc3d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 16:37:49 +0000 Subject: [PATCH 18/43] Common - Tools - gRPC - Constraints: - implemented missing methods --- src/common/tools/grpc/Constraints.py | 59 ++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 3 deletions(-) diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py index 53f7dfd98..07f0b7782 100644 --- a/src/common/tools/grpc/Constraints.py +++ b/src/common/tools/grpc/Constraints.py @@ -17,7 +17,7 @@ import json -from typing import Any, Dict, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from common.proto.context_pb2 import Constraint, EndPointId from common.tools.grpc.Tools import grpc_message_to_json_string @@ -137,7 +137,31 @@ def update_constraint_endpoint_priority(constraints, endpoint_id : EndPointId, p constraint.endpoint_priority.priority = priority return constraint -def update_constraint_sla_availability(constraints, num_disjoint_paths : int, all_active : bool) -> Constraint: +def update_constraint_sla_capacity(constraints, capacity_gbps : float) -> Constraint: + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'sla_capacity': continue + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + + constraint.sla_capacity.capacity_gbps = capacity_gbps + return constraint + +def update_constraint_sla_latency(constraints, e2e_latency_ms : float) -> Constraint: + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'sla_latency': continue + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + + constraint.sla_latency.e2e_latency_ms = e2e_latency_ms + return constraint + +def update_constraint_sla_availability( + constraints, num_disjoint_paths : int, all_active : bool, availability : float +) -> Constraint: for constraint in constraints: if constraint.WhichOneof('constraint') != 'sla_availability': continue break # found, end loop @@ -147,8 +171,21 @@ def update_constraint_sla_availability(constraints, num_disjoint_paths : int, al constraint.sla_availability.num_disjoint_paths = num_disjoint_paths constraint.sla_availability.all_active = all_active + constraint.sla_availability.availability = availability return constraint +def update_constraint_sla_isolation(constraints, isolation_levels : List[int]) -> Constraint: + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'sla_isolation': continue + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + + for isolation_level in isolation_levels: + if isolation_level in constraint.sla_isolation.isolation_level: continue + constraint.sla_isolation.isolation_level.append(isolation_level) + return constraint def copy_constraints(source_constraints, target_constraints): for source_constraint in source_constraints: @@ -189,11 +226,27 @@ def copy_constraints(source_constraints, target_constraints): priority = source_constraint.endpoint_priority.priority update_constraint_endpoint_priority(target_constraints, endpoint_id, priority) + elif constraint_kind == 'sla_capacity': + sla_capacity = source_constraint.sla_capacity + capacity_gbps = sla_capacity.capacity_gbps + update_constraint_sla_capacity(target_constraints, capacity_gbps) + + elif constraint_kind == 'sla_latency': + sla_latency = source_constraint.sla_latency + e2e_latency_ms = sla_latency.e2e_latency_ms + update_constraint_sla_latency(target_constraints, e2e_latency_ms) + elif constraint_kind == 'sla_availability': sla_availability = source_constraint.sla_availability num_disjoint_paths = sla_availability.num_disjoint_paths all_active = sla_availability.all_active - update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active) + availability = sla_availability.availability + update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active, availability) + + elif constraint_kind == 'sla_isolation': + sla_isolation = source_constraint.sla_isolation + isolation_levels = sla_isolation.isolation_level + update_constraint_sla_isolation(target_constraints, isolation_levels) else: raise NotImplementedError('Constraint({:s})'.format(grpc_message_to_json_string(source_constraint))) -- GitLab From 16f4fa1391055dfeaf4fe94f73fd65dab2c8d440 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 16:38:07 +0000 Subject: [PATCH 19/43] Common - Tools - Object Factory - Constraints: - implemented missing methods --- src/common/tools/object_factory/Constraint.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/common/tools/object_factory/Constraint.py b/src/common/tools/object_factory/Constraint.py index e3c5129fd..ef00e3872 100644 --- a/src/common/tools/object_factory/Constraint.py +++ b/src/common/tools/object_factory/Constraint.py @@ -13,7 +13,7 @@ # limitations under the License. import json -from typing import Any, Dict, Union +from typing import Any, Dict, List, Union def json_constraint_custom(constraint_type : str, constraint_value : Union[str, Dict[str, Any]]) -> Dict: if not isinstance(constraint_value, str): constraint_value = json.dumps(constraint_value, sort_keys=True) @@ -29,5 +29,16 @@ def json_constraint_endpoint_location_gps(endpoint_id : Dict, latitude : float, def json_constraint_endpoint_priority(endpoint_id : Dict, priority : int) -> Dict: return {'endpoint_priority': {'endpoint_id': endpoint_id, 'priority': priority}} -def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool) -> Dict: - return {'sla_availability': {'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active}} +def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool, availability : float) -> Dict: + return {'sla_availability': { + 'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active, 'availability': availability + }} + +def json_constraint_sla_capacity(capacity_gbps : float) -> Dict: + return {'sla_capacity': {'capacity_gbps': capacity_gbps}} + +def json_constraint_sla_isolation(isolation_levels : List[int]) -> Dict: + return {'sla_isolation': {'isolation_level': isolation_levels}} + +def json_constraint_sla_latency(e2e_latency_ms : float) -> Dict: + return {'sla_latency': {'e2e_latency_ms': e2e_latency_ms}} -- GitLab From d2ef8068c2356f3a0396342ce714859841897a4d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 16:39:05 +0000 Subject: [PATCH 20/43] Context component: - fixed check of endpoints in SetService - fixed check of endpoints in SetSlice - fixed slice-subslice relationship --- src/context/service/database/Service.py | 7 +++++-- src/context/service/database/Slice.py | 11 +++++++---- src/context/service/database/models/SliceModel.py | 4 ++++ 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index 4b63a4ae5..a81a80c3c 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -77,8 +77,11 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]: service_endpoints_data : List[Dict] = list() for i,endpoint_id in enumerate(request.service_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid - if endpoint_context_uuid not in {raw_context_uuid, context_uuid}: + if len(endpoint_context_uuid) == 0: + endpoint_context_uuid = context_get_uuid(request.service_id.context_id, allow_random=False) + else: + endpoint_context_uuid = context_get_uuid(endpoint_id.topology_id.context_id, allow_random=False) + if endpoint_context_uuid != context_uuid: raise InvalidArgumentException( 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), endpoint_context_uuid, diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 7c291e33d..b841b9cc8 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -77,8 +77,11 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: slice_endpoints_data : List[Dict] = list() for i,endpoint_id in enumerate(request.slice_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid - if endpoint_context_uuid not in {raw_context_uuid, context_uuid}: + if len(endpoint_context_uuid) == 0: + endpoint_context_uuid = context_get_uuid(request.slice_id.context_id, allow_random=False) + else: + endpoint_context_uuid = context_get_uuid(endpoint_id.topology_id.context_id, allow_random=False) + if endpoint_context_uuid != context_uuid: raise InvalidArgumentException( 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), endpoint_context_uuid, @@ -210,13 +213,13 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: )).delete() if len(slice_subslice_uuids) > 0: num_deletes += session.query(SliceSubSliceModel)\ - .filter_by(and_( + .filter(and_( SliceSubSliceModel.slice_uuid == slice_uuid, SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) )).delete() if len(slice_endpoint_uuids) > 0: num_deletes += session.query(SliceEndPointModel)\ - .filter_by(and_( + .filter(and_( SliceEndPointModel.slice_uuid == slice_uuid, SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) )).delete() diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py index 1a562bcd9..6943861c8 100644 --- a/src/context/service/database/models/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -100,3 +100,7 @@ class SliceSubSliceModel(_Base): slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) + + slice = relationship( + 'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined') + subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined') -- GitLab From 5c95b835f15263c1223cdc00f9e5a0c3d4cbba5f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 16:40:03 +0000 Subject: [PATCH 21/43] PathComp component - FrontEnd: - removed unused import in unitary test --- src/pathcomp/frontend/tests/test_unitary.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py index fd14c8a7a..d06386231 100644 --- a/src/pathcomp/frontend/tests/test_unitary.py +++ b/src/pathcomp/frontend/tests/test_unitary.py @@ -23,7 +23,6 @@ from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Service import json_service_l3nm_planned from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient from pathcomp.frontend.client.PathCompClient import PathCompClient # Scenarios: -- GitLab From bfacc00a3e1264c21b7042fd009c422a91b31ea1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 17:08:02 +0000 Subject: [PATCH 22/43] Scripts: - added missign script show logs load generator --- scripts/show_logs_load_generator.sh | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 scripts/show_logs_load_generator.sh diff --git a/scripts/show_logs_load_generator.sh b/scripts/show_logs_load_generator.sh new file mode 100755 index 000000000..d0f2527d7 --- /dev/null +++ b/scripts/show_logs_load_generator.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/load-generatorservice -- GitLab From 36491867d7adc8aad907a6b12501168b8422c804 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 17:26:11 +0000 Subject: [PATCH 23/43] Common - Tools - Descriptor: - corrected parsing of slice descriptors --- src/common/tools/descriptor/Tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/tools/descriptor/Tools.py b/src/common/tools/descriptor/Tools.py index 9d6275748..f03c635b8 100644 --- a/src/common/tools/descriptor/Tools.py +++ b/src/common/tools/descriptor/Tools.py @@ -72,7 +72,7 @@ def format_service_custom_config_rules(service : Dict) -> Dict: return service def format_slice_custom_config_rules(slice_ : Dict) -> Dict: - config_rules = slice_.get('service_config', {}).get('config_rules', []) + config_rules = slice_.get('slice_config', {}).get('config_rules', []) config_rules = format_custom_config_rules(config_rules) slice_['slice_config']['config_rules'] = config_rules return slice_ -- GitLab From 86fc7c5c95b55247db4294f11ec17b0e73e0e86c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:29:19 +0000 Subject: [PATCH 24/43] PathComp component - Frontend: - Updated to use specific SLA Constraints together with Custom constraints - Updated related unitary tests --- .../algorithms/KDisjointPathAlgorithm.py | 17 +++++++++-- .../algorithms/tools/ComposeRequest.py | 29 ++++++++++++------- src/pathcomp/frontend/tests/Objects_A_B_C.py | 6 ++-- .../frontend/tests/Objects_DC_CSGW_TN.py | 6 ++-- .../frontend/tests/Objects_DC_CSGW_TN_OLS.py | 6 ++-- src/pathcomp/frontend/tests/test_unitary.py | 12 ++++---- .../misc/example-results-kdisjointpaths.json | 4 +-- 7 files changed, 49 insertions(+), 31 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py index a9fc4fa3d..a6d39ee36 100644 --- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py @@ -54,13 +54,15 @@ class KDisjointPathAlgorithm(_Algorithm): self.services_details.setdefault(service_key, service_details) for constraint in service.service_constraints: - if constraint.WhichOneof('constraint') == 'custom': + kind = constraint.WhichOneof('constraint') + + if kind == 'custom': constraint_type = constraint.custom.constraint_type if constraint_type not in CUSTOM_CONSTRAINTS: continue constraint_value = constraint.custom.constraint_value constraints[constraint_type] = constraint_value - if constraint.WhichOneof('constraint') == 'endpoint_location': + elif kind == 'endpoint_location': endpoint_id = constraint.endpoint_location.endpoint_id device_uuid = endpoint_id.device_id.device_uuid.uuid device_uuid = self.device_name_mapping.get(device_uuid, device_uuid) @@ -73,7 +75,7 @@ class KDisjointPathAlgorithm(_Algorithm): site_id = constraint.endpoint_location.location.region endpoints.setdefault((device_uuid, endpoint_uuid), dict())['site_id'] = site_id - if constraint.WhichOneof('constraint') == 'endpoint_priority': + elif kind == 'endpoint_priority': endpoint_id = constraint.endpoint_priority.endpoint_id device_uuid = endpoint_id.device_id.device_uuid.uuid device_uuid = self.device_name_mapping.get(device_uuid, device_uuid) @@ -82,9 +84,18 @@ class KDisjointPathAlgorithm(_Algorithm): priority = constraint.endpoint_priority.priority endpoints.setdefault((device_uuid, endpoint_uuid), dict())['priority'] = priority + elif kind == 'sla_capacity': + capacity_gbps = constraint.sla_capacity.capacity_gbps + constraints['bandwidth[gbps]'] = str(capacity_gbps) + + elif kind == 'sla_latency': + e2e_latency_ms = constraint.sla_latency.e2e_latency_ms + constraints['latency[ms]'] = str(e2e_latency_ms) + # TODO: ensure these constraints are provided in the request if 'bandwidth[gbps]' not in constraints: constraints['bandwidth[gbps]'] = '20.0' if 'latency[ms]' not in constraints: constraints['latency[ms]'] = '20.0' + #if 'jitter[us]' not in constraints: constraints['jitter[us]'] = '50.0' def get_link_from_endpoint(self, endpoint : Dict) -> Tuple[Dict, Link]: device_uuid = endpoint['device_id'] diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index bfb4da05f..ee85f0bb0 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -73,17 +73,22 @@ def compose_latency_characteristics(fixed_latency_characteristic : str) -> Dict: return {'fixed-latency-characteristic': fixed_latency_characteristic} def compose_constraint(constraint : Constraint) -> Dict: - if constraint.WhichOneof('constraint') != 'custom': - str_constraint = grpc_message_to_json_string(constraint) - LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint)) - return None - constraint_type = constraint.custom.constraint_type - if constraint_type in {'diversity'}: - str_constraint = grpc_message_to_json_string(constraint) - LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint)) - return None - constraint_value = constraint.custom.constraint_value - return {'constraint_type': constraint_type, 'constraint_value': constraint_value} + kind = constraint.WhichOneof('constraint') + if kind == 'custom': + constraint_type = constraint.custom.constraint_type + if constraint_type in {'bandwidth[gbps]', 'latency[ms]', 'jitter[us]'}: + constraint_value = constraint.custom.constraint_value + return {'constraint_type': constraint_type, 'constraint_value': constraint_value} + elif kind == 'sla_capacity': + capacity_gbps = constraint.sla_capacity.capacity_gbps + return {'constraint_type': 'bandwidth[gbps]', 'constraint_value': str(capacity_gbps)} + elif kind == 'sla_latency': + e2e_latency_ms = constraint.sla_latency.e2e_latency_ms + return {'constraint_type': 'latency[ms]', 'constraint_value': str(e2e_latency_ms)} + + str_constraint = grpc_message_to_json_string(constraint) + LOGGER.warning('Ignoring unsupported Constraint({:s})'.format(str_constraint)) + return None def compose_device(grpc_device : Device) -> Dict: device_uuid = grpc_device.device_id.device_uuid.uuid @@ -144,6 +149,8 @@ def compose_service(grpc_service : Service) -> Dict: constraints.append({'constraint_type': 'bandwidth[gbps]', 'constraint_value': '20.0'}) if 'latency[ms]' not in constraint_types: constraints.append({'constraint_type': 'latency[ms]', 'constraint_value': '20.0'}) + #if 'jitter[us]' not in constraint_types: + # constraints.append({'constraint_type': 'jitter[us]', 'constraint_value': '50.0'}) return { 'serviceId': service_id, diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py index ca9764a34..f26d74ce4 100644 --- a/src/pathcomp/frontend/tests/Objects_A_B_C.py +++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py @@ -13,7 +13,7 @@ # limitations under the License. from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id from common.tools.object_factory.EndPoint import json_endpoints @@ -97,8 +97,8 @@ LINK_C2_C3_ID, LINK_C2_C3 = compose_link(DEVICE_C2_ENDPOINTS[1], DEVICE_C3_ENDPO # ----- Service -------------------------------------------------------------------------------------------------------- SERVICE_A1_B1 = compose_service(DEVICE_A1_ENDPOINTS[2], DEVICE_B1_ENDPOINTS[2], constraints=[ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 12.0), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(12.0), ]) # ----- Containers ----------------------------------------------------------------------------------------------------- diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py index 1d057c10e..9ee784e1f 100644 --- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py +++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py @@ -13,7 +13,7 @@ # limitations under the License. from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -139,8 +139,8 @@ LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4 # ----- Service -------------------------------------------------------------------------------------------------------- SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 20.0), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(20.0), ]) # ----- Containers ----------------------------------------------------------------------------------------------------- diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py index 8f6e88719..71510d088 100644 --- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py +++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py @@ -14,7 +14,7 @@ import uuid from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -149,8 +149,8 @@ LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3 # ----- Service -------------------------------------------------------------------------------------------------------- SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 20.0), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(20.0), ]) # ----- Containers ----------------------------------------------------------------------------------------------------- diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py index d06386231..8088259b8 100644 --- a/src/pathcomp/frontend/tests/test_unitary.py +++ b/src/pathcomp/frontend/tests/test_unitary.py @@ -18,7 +18,7 @@ from common.proto.pathcomp_pb2 import PathCompRequest from common.tools.grpc.Tools import grpc_message_to_json from common.tools.object_factory.Constraint import ( json_constraint_custom, json_constraint_endpoint_location_region, json_constraint_endpoint_priority, - json_constraint_sla_availability) + json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_latency) from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Service import json_service_l3nm_planned @@ -89,8 +89,8 @@ def test_request_service_shortestpath( request_services = copy.deepcopy(SERVICES) #request_services[0]['service_constraints'] = [ - # json_constraint_custom('bandwidth[gbps]', 1000.0), - # json_constraint_custom('latency[ms]', 1200.0), + # json_constraint_sla_capacity(1000.0), + # json_constraint_sla_latency(1200.0), #] pathcomp_request = PathCompRequest(services=request_services) pathcomp_request.shortest_path.Clear() # hack to select the shortest path algorithm that has no attributes @@ -201,9 +201,9 @@ def test_request_service_kdisjointpath( ] endpoint_ids, constraints = [], [ - json_constraint_custom('bandwidth[gbps]', 10.0), - json_constraint_custom('latency[ms]', 12.0), - json_constraint_sla_availability(2, True), + json_constraint_sla_capacity(10.0), + json_constraint_sla_latency(12.0), + json_constraint_sla_availability(2, True, 50.0), json_constraint_custom('diversity', {'end-to-end-diverse': 'all-other-accesses'}), ] diff --git a/src/pathcomp/misc/example-results-kdisjointpaths.json b/src/pathcomp/misc/example-results-kdisjointpaths.json index 9eda25d48..c1dbf3a3c 100644 --- a/src/pathcomp/misc/example-results-kdisjointpaths.json +++ b/src/pathcomp/misc/example-results-kdisjointpaths.json @@ -64,8 +64,8 @@ ], "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, "service_constraints": [ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "12.0"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 12.0}} ], "service_config": {"config_rules": []} } -- GitLab From 35d2fcfe440865762879f3a6c07da79b8ebcdf96 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:31:13 +0000 Subject: [PATCH 25/43] Service component: - Corrected inference of pathcomp algorithm and parameters - Removed unneeded log messages/reduced log levels --- .../service/ServiceServiceServicerImpl.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 622abeee8..0b2e07601 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -38,8 +38,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CreateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: - LOGGER.info('[CreateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - if len(request.service_endpoint_ids) > 0: unexpected_endpoints = [] for service_endpoint_id in request.service_endpoint_ids: @@ -85,8 +83,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: - LOGGER.info('[UpdateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - # Set service status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the service is # being modified. context_client = ContextClient() @@ -112,27 +108,30 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): service_id_with_uuids = context_client.SetService(service) service_with_uuids = context_client.GetService(service_id_with_uuids) - num_disjoint_paths = None + num_disjoint_paths = 0 for constraint in request.service_constraints: if constraint.WhichOneof('constraint') == 'sla_availability': num_disjoint_paths = constraint.sla_availability.num_disjoint_paths break + num_disjoint_paths = 1 if num_disjoint_paths is None or num_disjoint_paths == 0 else num_disjoint_paths + num_expected_endpoints = num_disjoint_paths * 2 + tasks_scheduler = TasksScheduler(self.service_handler_factory) - if len(service_with_uuids.service_endpoint_ids) >= (2 if num_disjoint_paths is None else 4): + if len(service_with_uuids.service_endpoint_ids) >= num_expected_endpoints: pathcomp_request = PathCompRequest() pathcomp_request.services.append(service_with_uuids) # pylint: disable=no-member - if num_disjoint_paths is None: + if num_disjoint_paths is None or num_disjoint_paths in {0, 1}: pathcomp_request.shortest_path.Clear() # pylint: disable=no-member else: pathcomp_request.k_disjoint_path.num_disjoint = num_disjoint_paths # pylint: disable=no-member - LOGGER.info('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) + LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) pathcomp = PathCompClient() pathcomp_reply = pathcomp.Compute(pathcomp_request) pathcomp.close() - LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) + LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) # Feed TaskScheduler with this path computation reply. TaskScheduler identifies inter-dependencies among # the services and connections retrieved and produces a schedule of tasks (an ordered list of tasks to be @@ -144,8 +143,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: - LOGGER.info('[DeleteService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - context_client = ContextClient() # Set service status to "SERVICESTATUS_PENDING_REMOVAL" to ensure rest of components are aware the service is -- GitLab From a6c1340afecf29d82bf4c4620f8904ac68c21105 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:32:12 +0000 Subject: [PATCH 26/43] Common - Context Queries - InterDomain: - Corrected Constraint definitions --- src/common/tools/context_queries/InterDomain.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py index 7317cc793..edb640708 100644 --- a/src/common/tools/context_queries/InterDomain.py +++ b/src/common/tools/context_queries/InterDomain.py @@ -136,13 +136,11 @@ def compute_interdomain_path( service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add() service_endpoint_id.CopyFrom(endpoint_id) - constraint_bw = pathcomp_req_svc.service_constraints.add() - constraint_bw.custom.constraint_type = 'bandwidth[gbps]' - constraint_bw.custom.constraint_value = '10.0' + constraint_sla_capacity = pathcomp_req_svc.service_constraints.add() + constraint_sla_capacity.sla_capacity.capacity_gbps = 10.0 - constraint_lat = pathcomp_req_svc.service_constraints.add() - constraint_lat.custom.constraint_type = 'latency[ms]' - constraint_lat.custom.constraint_value = '100.0' + constraint_sla_latency = pathcomp_req_svc.service_constraints.add() + constraint_sla_latency.sla_latency.e2e_latency_ms = 100.0 LOGGER.debug('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) pathcomp_rep = pathcomp_client.Compute(pathcomp_req) -- GitLab From da51b8957c48058e36d0d0aeaebe5e335117b0fe Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:32:56 +0000 Subject: [PATCH 27/43] Context component: - Corrected Constraint definitions in unitary tests --- src/context/tests/Objects.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 8634c1f30..6b52ef4c0 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -17,7 +17,7 @@ from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.object_factory.ConfigRule import json_config_rule_set from common.tools.object_factory.Connection import json_connection, json_connection_id -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import json_constraint_custom, json_constraint_sla_latency from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import json_device_id, json_device_packetrouter_disabled from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id @@ -95,7 +95,7 @@ def compose_service( for device_id, endpoint_name in endpoint_ids ] constraints = [ - json_constraint_custom('latency[ms]', str(latency_ms)), + json_constraint_sla_latency(latency_ms), json_constraint_custom('jitter[us]', str(jitter_us)), ] config_rules = [ @@ -128,7 +128,7 @@ def compose_slice( for device_id, endpoint_name in endpoint_ids ] constraints = [ - json_constraint_custom('latency[ms]', str(latency_ms)), + json_constraint_sla_latency(latency_ms), json_constraint_custom('jitter[us]', str(jitter_us)), ] config_rules = [ -- GitLab From 3dfc06c45e3530a6e91f08f21a6ef1ca1970f2f0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:34:02 +0000 Subject: [PATCH 28/43] Test tools - Mock_OSM: - Corrected Constraint definitions in example descriptors --- src/tests/tools/mock_sdn_ctrl/service_descriptor.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json index a4109bc7b..2d4ed3eaf 100644 --- a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json +++ b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json @@ -12,8 +12,8 @@ {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "EXT"}} ], "service_constraints": [ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "15.2"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 15.2}} ], "service_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "/settings", "resource_value": { -- GitLab From 16ffcc1996aac863b27f327ae0e5abba2d17295d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:34:50 +0000 Subject: [PATCH 29/43] Hackfest service descriptors: - Corrected Constraint definitions --- hackfest/tfs-descriptors/old/service.json | 4 ++-- hackfest/tfs-descriptors/service-l3vpn.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hackfest/tfs-descriptors/old/service.json b/hackfest/tfs-descriptors/old/service.json index a25d0171d..26804dcf1 100644 --- a/hackfest/tfs-descriptors/old/service.json +++ b/hackfest/tfs-descriptors/old/service.json @@ -18,8 +18,8 @@ {"device_id":{"device_uuid":{"uuid":"R2"}},"endpoint_uuid":{"uuid":"1/3"}} ], "service_constraints":[ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "20.0"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 20.0}} ], "service_config":{"config_rules":[]} } diff --git a/hackfest/tfs-descriptors/service-l3vpn.json b/hackfest/tfs-descriptors/service-l3vpn.json index 457ba1a50..723453b8b 100644 --- a/hackfest/tfs-descriptors/service-l3vpn.json +++ b/hackfest/tfs-descriptors/service-l3vpn.json @@ -12,8 +12,8 @@ {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "1/2"}} ], "service_constraints": [ - {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, - {"custom": {"constraint_type": "latency[ms]", "constraint_value": "15.2"}} + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 15.2}} ], "service_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "/settings", "resource_value": { -- GitLab From bc2b0e9ef1740e575b083eaf301cd4ad36a652f9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:39:35 +0000 Subject: [PATCH 30/43] Context component: - Corrected slice-to-subslice db schema - Corrected removal of endpoints --- src/context/service/database/Slice.py | 2 +- src/context/service/database/models/SliceModel.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index b841b9cc8..80af759de 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -180,7 +180,7 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: if len(request.slice_constraints) > 0: raise NotImplementedError('UnsetSlice: removal of constraints') if len(request.slice_config.config_rules) > 0: raise NotImplementedError('UnsetSlice: removal of config rules') - if len(request.slice_endpoint_ids) > 0: raise NotImplementedError('UnsetSlice: removal of endpoints') + #if len(request.slice_endpoint_ids) > 0: raise NotImplementedError('UnsetSlice: removal of endpoints') slice_endpoint_uuids : Set[str] = set() for i,endpoint_id in enumerate(request.slice_endpoint_ids): diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py index 6943861c8..458bc714a 100644 --- a/src/context/service/database/models/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -98,8 +98,8 @@ class SliceServiceModel(_Base): class SliceSubSliceModel(_Base): __tablename__ = 'slice_subslice' - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True) + subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True) slice = relationship( 'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined') -- GitLab From 42f566fcd84079d670255aacb26293571f2c4b60 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:41:31 +0000 Subject: [PATCH 31/43] WebUI component: - Minor cosmetic changes in code - Addition of missing close methods --- src/webui/service/main/routes.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index dcbbf71a6..32cefddf3 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import base64, json, logging, re +import base64, json, logging #, re from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request from common.proto.context_pb2 import ContextList, Empty, TopologyId, TopologyList from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications @@ -55,7 +55,7 @@ def process_descriptors(descriptors): def home(): context_client.connect() device_client.connect() - context_topology_form: ContextTopologyForm = ContextTopologyForm() + context_topology_form = ContextTopologyForm() context_topology_form.context_topology.choices.append(('', 'Select...')) contexts : ContextList = context_client.ListContexts(Empty()) @@ -87,6 +87,10 @@ def home(): #session['topology_name'] = topology_name MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.' flash(MSG, 'success') + + context_client.close() + device_client.close() + return redirect(url_for('main.home')) #match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid) @@ -101,7 +105,7 @@ def home(): if 'context_topology_uuid' in session: context_topology_form.context_topology.data = session['context_topology_uuid'] - descriptor_form: DescriptorForm = DescriptorForm() + descriptor_form = DescriptorForm() try: if descriptor_form.validate_on_submit(): process_descriptors(descriptor_form.descriptors) -- GitLab From 41f314a85f07429fa6eaecd46aaaa6637c563c4d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:44:50 +0000 Subject: [PATCH 32/43] Load-Generator component: - Corrected Constraint definitions - Corrected termination conditions - Added randomness in constraint values --- .../load_gen/RequestGenerator.py | 66 +++++++++++++------ .../load_gen/RequestScheduler.py | 30 ++++++--- 2 files changed, 68 insertions(+), 28 deletions(-) diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index 906c26e98..a6d14307e 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -14,9 +14,11 @@ import logging, json, random, threading from typing import Dict, Optional, Set, Tuple -from common.proto.context_pb2 import Empty, TopologyId +from common.proto.context_pb2 import Empty, IsolationLevelEnum, TopologyId from common.tools.grpc.Tools import grpc_message_to_json -from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Constraint import ( + json_constraint_sla_availability, json_constraint_sla_capacity, json_constraint_sla_isolation, + json_constraint_sla_latency) from common.tools.object_factory.ConfigRule import json_config_rule_set from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.EndPoint import json_endpoint_id @@ -36,7 +38,7 @@ class RequestGenerator: def __init__(self, parameters : Parameters) -> None: self._parameters = parameters self._lock = threading.Lock() - self._num_requests = 0 + self._num_generated = 0 self._available_device_endpoints : Dict[str, Set[str]] = dict() self._used_device_endpoints : Dict[str, Dict[str, str]] = dict() self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict() @@ -45,6 +47,12 @@ class RequestGenerator: self._device_data : Dict[str, Dict] = dict() self._device_endpoint_data : Dict[str, Dict[str, Dict]] = dict() + @property + def num_generated(self): return self._num_generated + + @property + def infinite_loop(self): return self._parameters.num_requests == 0 + def initialize(self) -> None: with self._lock: self._available_device_endpoints.clear() @@ -96,17 +104,14 @@ class RequestGenerator: if self._parameters.record_to_dlt: record_link_to_dlt(dlt_connector_client, dlt_domain_id, link.link_id) - @property - def num_requests_generated(self): return self._num_requests - def dump_state(self) -> None: with self._lock: _endpoints = { device_uuid:[endpoint_uuid for endpoint_uuid in endpoint_uuids] for device_uuid,endpoint_uuids in self._available_device_endpoints.items() } - LOGGER.info('[dump_state] available_device_endpoints = {:s}'.format(json.dumps(_endpoints))) - LOGGER.info('[dump_state] used_device_endpoints = {:s}'.format(json.dumps(self._used_device_endpoints))) + LOGGER.debug('[dump_state] available_device_endpoints = {:s}'.format(json.dumps(_endpoints))) + LOGGER.debug('[dump_state] used_device_endpoints = {:s}'.format(json.dumps(self._used_device_endpoints))) def _use_device_endpoint( self, service_uuid : str, request_type : RequestType, endpoint_types : Optional[Set[str]] = None, @@ -167,10 +172,13 @@ class RequestGenerator: self._used_device_endpoints.setdefault(device_uuid, dict()).pop(endpoint_uuid, None) self._available_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid) - def compose_request(self) -> Optional[Dict]: + def compose_request(self) -> Tuple[bool, Optional[Dict]]: # completed, request with self._lock: - self._num_requests += 1 - num_request = self._num_requests + if not self.infinite_loop and (self._num_generated >= self._parameters.num_requests): + LOGGER.info('Generation Done!') + return True, None # completed + self._num_generated += 1 + num_request = self._num_generated #request_uuid = str(uuid.uuid4()) request_uuid = 'svc_{:d}'.format(num_request) @@ -181,9 +189,9 @@ class RequestGenerator: if request_type in { RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, RequestType.SERVICE_TAPI, RequestType.SERVICE_MW }: - return self._compose_service(num_request, request_uuid, request_type) + return False, self._compose_service(num_request, request_uuid, request_type) elif request_type in {RequestType.SLICE_L2NM, RequestType.SLICE_L3NM}: - return self._compose_slice(num_request, request_uuid, request_type) + return False, self._compose_slice(num_request, request_uuid, request_type) def _compose_service(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]: # choose source endpoint @@ -222,10 +230,17 @@ class RequestGenerator: ] if request_type == RequestType.SERVICE_L2NM: + availability = int(random.uniform(00.0, 99.99) * 100.0) / 100.0 + capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0 + e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0 + constraints = [ - json_constraint_custom('bandwidth[gbps]', '10.0'), - json_constraint_custom('latency[ms]', '20.0'), + json_constraint_sla_availability(1, True, availability), + json_constraint_sla_capacity(capacity_gbps), + json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), + json_constraint_sla_latency(e2e_latency_ms), ] + vlan_id = num_request % 1000 circuit_id = '{:03d}'.format(vlan_id) @@ -260,10 +275,17 @@ class RequestGenerator: request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) elif request_type == RequestType.SERVICE_L3NM: + availability = int(random.uniform(00.0, 99.99) * 100.0) / 100.0 + capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0 + e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0 + constraints = [ - json_constraint_custom('bandwidth[gbps]', '10.0'), - json_constraint_custom('latency[ms]', '20.0'), + json_constraint_sla_availability(1, True, availability), + json_constraint_sla_capacity(capacity_gbps), + json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), + json_constraint_sla_latency(e2e_latency_ms), ] + vlan_id = num_request % 1000 bgp_as = 60000 + (num_request % 10000) bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333) @@ -357,9 +379,15 @@ class RequestGenerator: json_endpoint_id(json_device_id(src_device_uuid), src_endpoint_uuid), json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), ] + + availability = int(random.uniform(00.0, 99.99) * 100.0) / 100.0 + capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0 + e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0 constraints = [ - json_constraint_custom('bandwidth[gbps]', '10.0'), - json_constraint_custom('latency[ms]', '20.0'), + json_constraint_sla_availability(1, True, availability), + json_constraint_sla_capacity(capacity_gbps), + json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), + json_constraint_sla_latency(e2e_latency_ms), ] if request_type == RequestType.SLICE_L2NM: diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py index 775da1580..13ae70deb 100644 --- a/src/load_generator/load_gen/RequestScheduler.py +++ b/src/load_generator/load_gen/RequestScheduler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, logging, pytz, random +import copy, logging, pytz, random, threading from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.blocking import BlockingScheduler @@ -46,14 +46,18 @@ class RequestScheduler: timezone=pytz.utc) self._parameters = parameters self._generator = generator + self._running = threading.Event() + + @property + def num_generated(self): return max(self._generator.num_generated, self._parameters.num_requests) + + @property + def infinite_loop(self): return self._generator.infinite_loop + + @property + def running(self): return self._running.is_set() def _schedule_request_setup(self) -> None: - infinite_loop = self._parameters.num_requests == 0 - num_requests_generated = self._generator.num_requests_generated - 1 # because it first increases, then checks - if not infinite_loop and (num_requests_generated >= self._parameters.num_requests): - LOGGER.info('Generation Done!') - #self._scheduler.shutdown() - return iat = random.expovariate(1.0 / self._parameters.inter_arrival_time) run_date = datetime.utcnow() + timedelta(seconds=iat) self._scheduler.add_job( @@ -66,16 +70,24 @@ class RequestScheduler: self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc) def start(self): + self._running.set() self._schedule_request_setup() self._scheduler.start() def stop(self): self._scheduler.shutdown() + self._running.clear() def _request_setup(self) -> None: - self._schedule_request_setup() + completed,request = self._generator.compose_request() + if completed: + LOGGER.info('Generation Done!') + #self._scheduler.shutdown() + self._running.clear() + return + else: + self._schedule_request_setup() - request = self._generator.compose_request() if request is None: LOGGER.warning('No resources available to compose new request') return -- GitLab From 9dd7b10be8ed4bb38079eb520e5f0da2cd92cfb9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:46:48 +0000 Subject: [PATCH 33/43] Slice component: - Removed unneeded log messages/reduced log levels --- src/slice/service/SliceServiceServicerImpl.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 21d820089..d49f1c547 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -24,7 +24,7 @@ from common.tools.grpc.ConfigRules import copy_config_rules from common.tools.grpc.Constraints import copy_constraints from common.tools.grpc.EndPointIds import copy_endpoint_ids from common.tools.grpc.ServiceIds import update_service_ids -from common.tools.grpc.Tools import grpc_message_to_json_string +#from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from interdomain.client.InterdomainClient import InterdomainClient from service.client.ServiceClient import ServiceClient @@ -109,13 +109,13 @@ class SliceServiceServicerImpl(SliceServiceServicer): service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN for config_rule in request.slice_config.config_rules: - LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) + #LOGGER.debug('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) config_rule_kind = config_rule.WhichOneof('config_rule') - LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind))) + #LOGGER.debug('config_rule_kind: {:s}'.format(str(config_rule_kind))) if config_rule_kind != 'custom': continue custom = config_rule.custom resource_key = custom.resource_key - LOGGER.info('resource_key: {:s}'.format(str(resource_key))) + #LOGGER.debug('resource_key: {:s}'.format(str(resource_key))) # TODO: parse resource key with regular expression, e.g.: # m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s) @@ -123,21 +123,21 @@ class SliceServiceServicerImpl(SliceServiceServicer): if not resource_key.endswith('/settings'): continue resource_value = json.loads(custom.resource_value) - LOGGER.info('resource_value: {:s}'.format(str(resource_value))) + #LOGGER.debug('resource_value: {:s}'.format(str(resource_value))) if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: if (resource_value.get('address_ip') is not None and \ resource_value.get('address_prefix') is not None): service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM - LOGGER.info('is L3') + #LOGGER.debug('is L3') else: service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM - LOGGER.info('is L2') + #LOGGER.debug('is L2') break if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM - LOGGER.info('assume L2') + #LOGGER.debug('assume L2') service_client.UpdateService(service_request) -- GitLab From ab663513ccda05340aa88222deca2e03887a961e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 15:15:35 +0000 Subject: [PATCH 34/43] Load-Generator component: - Corrected termination conditions --- src/load_generator/load_gen/RequestScheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py index 13ae70deb..57afe80be 100644 --- a/src/load_generator/load_gen/RequestScheduler.py +++ b/src/load_generator/load_gen/RequestScheduler.py @@ -49,7 +49,7 @@ class RequestScheduler: self._running = threading.Event() @property - def num_generated(self): return max(self._generator.num_generated, self._parameters.num_requests) + def num_generated(self): return min(self._generator.num_generated, self._parameters.num_requests) @property def infinite_loop(self): return self._generator.infinite_loop -- GitLab From 384f7e9caebc16aa11ed62a0ac1af0deb9f8848d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 16:35:34 +0000 Subject: [PATCH 35/43] Service component: - Reduced log level of TaskScheduler internal log messages --- src/service/service/task_scheduler/TaskScheduler.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index f55527e47..fbc554aa2 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -130,7 +130,7 @@ class TasksScheduler: self._dag.add(connection_key, service_key_done) t1 = time.time() - LOGGER.info('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0)) + LOGGER.debug('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0)) def compose_from_service(self, service : Service, is_delete : bool = False) -> None: t0 = time.time() @@ -196,11 +196,11 @@ class TasksScheduler: raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item))) t1 = time.time() - LOGGER.info('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) def execute_all(self, dry_run : bool = False) -> None: ordered_task_keys = list(self._dag.static_order()) - LOGGER.info('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys))) + LOGGER.debug('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys))) results = [] for task_key in ordered_task_keys: @@ -208,5 +208,5 @@ class TasksScheduler: succeeded = True if dry_run else task.execute() results.append(succeeded) - LOGGER.info('[execute_all] results={:s}'.format(str(results))) + LOGGER.debug('[execute_all] results={:s}'.format(str(results))) return zip(ordered_task_keys, results) -- GitLab From 08e7992a1c569878f6513be2b7c2b551f4d0ee94 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 16:36:28 +0000 Subject: [PATCH 36/43] Context component: - Corrected method UnsetSlice to remove endpoints, constraints, and config rules --- src/context/service/database/ConfigRule.py | 8 +++--- src/context/service/database/Constraint.py | 30 ++++++++++++---------- src/context/service/database/Slice.py | 14 ++++++---- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py index dd60441ca..09723cc6f 100644 --- a/src/context/service/database/ConfigRule.py +++ b/src/context/service/database/ConfigRule.py @@ -80,7 +80,7 @@ def compose_config_rules_data( return dict_config_rules def upsert_config_rules( - session : Session, config_rules : List[Dict], + session : Session, config_rules : List[Dict], is_delete : bool = False, device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None, ) -> bool: uuids_to_delete : Set[str] = set() @@ -89,7 +89,9 @@ def upsert_config_rules( for config_rule in config_rules: configrule_uuid = config_rule['configrule_uuid'] configrule_action = config_rule['action'] - if configrule_action == ORM_ConfigActionEnum.SET: + if is_delete or configrule_action == ORM_ConfigActionEnum.DELETE: + uuids_to_delete.add(configrule_uuid) + elif configrule_action == ORM_ConfigActionEnum.SET: position = uuids_to_upsert.get(configrule_uuid) if position is None: # if not added, add it @@ -98,8 +100,6 @@ def upsert_config_rules( else: # if already added, update occurrence rules_to_upsert[position] = config_rule - elif configrule_action == ORM_ConfigActionEnum.DELETE: - uuids_to_delete.add(configrule_uuid) else: MSG = 'Action for ConfigRule({:s}) is not supported '+\ '(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})' diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index b37d0dcad..5ebe36f99 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -81,29 +81,31 @@ def compose_constraints_data( return dict_constraints def upsert_constraints( - session : Session, constraints : List[Dict], + session : Session, constraints : List[Dict], is_delete : bool = False, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None ) -> bool: uuids_to_upsert : Dict[str, int] = dict() rules_to_upsert : List[Dict] = list() - for constraint in constraints: - constraint_uuid = constraint['constraint_uuid'] - position = uuids_to_upsert.get(constraint_uuid) - if position is None: - # if not added, add it - rules_to_upsert.append(constraint) - uuids_to_upsert[constraint_uuid] = len(rules_to_upsert) - 1 - else: - # if already added, update occurrence - rules_to_upsert[position] = constraint + if not is_delete: + for constraint in constraints: + constraint_uuid = constraint['constraint_uuid'] + position = uuids_to_upsert.get(constraint_uuid) + if position is None: + # if not added, add it + rules_to_upsert.append(constraint) + uuids_to_upsert[constraint_uuid] = len(rules_to_upsert) - 1 + else: + # if already added, update occurrence + rules_to_upsert[position] = constraint # Delete all constraints not in uuids_to_upsert delete_affected = False - if len(uuids_to_upsert) > 0: + if is_delete or len(uuids_to_upsert) > 0: stmt = delete(ConstraintModel) if service_uuid is not None: stmt = stmt.where(ConstraintModel.service_uuid == service_uuid) if slice_uuid is not None: stmt = stmt.where(ConstraintModel.slice_uuid == slice_uuid ) - stmt = stmt.where(ConstraintModel.constraint_uuid.not_in(set(uuids_to_upsert.keys()))) + if not is_delete: + stmt = stmt.where(ConstraintModel.constraint_uuid.not_in(set(uuids_to_upsert.keys()))) #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}) #LOGGER.warning('delete stmt={:s}'.format(str(str_stmt))) constraint_deletes = session.execute(stmt) @@ -111,7 +113,7 @@ def upsert_constraints( delete_affected = int(constraint_deletes.rowcount) > 0 upsert_affected = False - if len(constraints) > 0: + if not is_delete and len(constraints) > 0: stmt = insert(ConstraintModel).values(constraints) stmt = stmt.on_conflict_do_update( index_elements=[ConstraintModel.constraint_uuid], diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 80af759de..1d6781d53 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -178,10 +178,6 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=False) - if len(request.slice_constraints) > 0: raise NotImplementedError('UnsetSlice: removal of constraints') - if len(request.slice_config.config_rules) > 0: raise NotImplementedError('UnsetSlice: removal of config rules') - #if len(request.slice_endpoint_ids) > 0: raise NotImplementedError('UnsetSlice: removal of endpoints') - slice_endpoint_uuids : Set[str] = set() for i,endpoint_id in enumerate(request.slice_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid @@ -203,6 +199,10 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: for subslice_id in request.slice_subslice_ids } + now = datetime.datetime.utcnow() + constraints = compose_constraints_data(request.slice_constraints, now, slice_uuid=slice_uuid) + config_rules = compose_config_rules_data(request.slice_config.config_rules, now, slice_uuid=slice_uuid) + def callback(session : Session) -> bool: num_deletes = 0 if len(slice_service_uuids) > 0: @@ -223,7 +223,11 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: SliceEndPointModel.slice_uuid == slice_uuid, SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) )).delete() - return num_deletes > 0 + + changed_constraints = upsert_constraints(session, constraints, is_delete=True, slice_uuid=slice_uuid) + changed_config_rules = upsert_config_rules(session, config_rules, is_delete=True, slice_uuid=slice_uuid) + + return num_deletes > 0 or changed_constraints or changed_config_rules updated = run_transaction(sessionmaker(bind=db_engine), callback) return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated -- GitLab From 870329f2de0e0110e265a753ef5cb270c09333dd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 17:44:34 +0000 Subject: [PATCH 37/43] Context component: - Corrected method UnsetSlice to remove constraints --- src/context/service/database/Constraint.py | 26 ++++++++++------------ 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index 5ebe36f99..3a73f6589 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -86,26 +86,24 @@ def upsert_constraints( ) -> bool: uuids_to_upsert : Dict[str, int] = dict() rules_to_upsert : List[Dict] = list() - if not is_delete: - for constraint in constraints: - constraint_uuid = constraint['constraint_uuid'] - position = uuids_to_upsert.get(constraint_uuid) - if position is None: - # if not added, add it - rules_to_upsert.append(constraint) - uuids_to_upsert[constraint_uuid] = len(rules_to_upsert) - 1 - else: - # if already added, update occurrence - rules_to_upsert[position] = constraint + for constraint in constraints: + constraint_uuid = constraint['constraint_uuid'] + position = uuids_to_upsert.get(constraint_uuid) + if position is None: + # if not added, add it + rules_to_upsert.append(constraint) + uuids_to_upsert[constraint_uuid] = len(rules_to_upsert) - 1 + else: + # if already added, update occurrence + rules_to_upsert[position] = constraint # Delete all constraints not in uuids_to_upsert delete_affected = False - if is_delete or len(uuids_to_upsert) > 0: + if len(uuids_to_upsert) > 0: stmt = delete(ConstraintModel) if service_uuid is not None: stmt = stmt.where(ConstraintModel.service_uuid == service_uuid) if slice_uuid is not None: stmt = stmt.where(ConstraintModel.slice_uuid == slice_uuid ) - if not is_delete: - stmt = stmt.where(ConstraintModel.constraint_uuid.not_in(set(uuids_to_upsert.keys()))) + stmt = stmt.where(ConstraintModel.constraint_uuid.not_in(set(uuids_to_upsert.keys()))) #str_stmt = stmt.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}) #LOGGER.warning('delete stmt={:s}'.format(str(str_stmt))) constraint_deletes = session.execute(stmt) -- GitLab From dd6cc334f8b63898052a864dfa43861e7470848d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 17:46:51 +0000 Subject: [PATCH 38/43] Load Generator component: - Extracted constant MAX_WORKER_THREADS - Rounded SLA values to 2 decimal digits --- src/load_generator/load_gen/Constants.py | 2 ++ .../load_gen/RequestGenerator.py | 18 +++++++++--------- .../load_gen/RequestScheduler.py | 3 ++- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/load_generator/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py index b71dd9a35..9ae3cdc12 100644 --- a/src/load_generator/load_gen/Constants.py +++ b/src/load_generator/load_gen/Constants.py @@ -26,3 +26,5 @@ ENDPOINT_COMPATIBILITY = { 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:INPUT': 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:OUTPUT', 'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT' : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT', } + +MAX_WORKER_THREADS = 10 \ No newline at end of file diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index a6d14307e..0ada285bc 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -230,9 +230,9 @@ class RequestGenerator: ] if request_type == RequestType.SERVICE_L2NM: - availability = int(random.uniform(00.0, 99.99) * 100.0) / 100.0 - capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0 - e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0 + availability = round(random.uniform(0.0, 99.99), ndigits=2) + capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) + e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ json_constraint_sla_availability(1, True, availability), @@ -275,9 +275,9 @@ class RequestGenerator: request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) elif request_type == RequestType.SERVICE_L3NM: - availability = int(random.uniform(00.0, 99.99) * 100.0) / 100.0 - capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0 - e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0 + availability = round(random.uniform(0.0, 99.99), ndigits=2) + capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) + e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ json_constraint_sla_availability(1, True, availability), @@ -380,9 +380,9 @@ class RequestGenerator: json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), ] - availability = int(random.uniform(00.0, 99.99) * 100.0) / 100.0 - capacity_gbps = int(random.uniform(0.1, 100.0) * 100.0) / 100.0 - e2e_latency_ms = int(random.uniform(5.0, 100.0) * 100.0) / 100.0 + availability = round(random.uniform(0.0, 99.99), ndigits=2) + capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) + e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ json_constraint_sla_availability(1, True, availability), json_constraint_sla_capacity(capacity_gbps), diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py index 57afe80be..773a37eac 100644 --- a/src/load_generator/load_gen/RequestScheduler.py +++ b/src/load_generator/load_gen/RequestScheduler.py @@ -21,6 +21,7 @@ from typing import Dict, Optional from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient +from .Constants import MAX_WORKER_THREADS from .DltTools import explore_entities_to_record, record_entities from .Parameters import Parameters from .RequestGenerator import RequestGenerator @@ -37,7 +38,7 @@ class RequestScheduler: self._scheduler = scheduler_class() self._scheduler.configure( jobstores = {'default': MemoryJobStore()}, - executors = {'default': ThreadPoolExecutor(max_workers=10)}, + executors = {'default': ThreadPoolExecutor(max_workers=MAX_WORKER_THREADS)}, job_defaults = { 'coalesce': False, 'max_instances': 100, -- GitLab From 3091a5724c9f6f81d6445cb5706244d8a3df11a8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 17:47:06 +0000 Subject: [PATCH 39/43] WebUI component: - Rounded SLA values to 2 decimal digits --- src/webui/service/__init__.py | 1 + src/webui/service/templates/service/detail.html | 4 ++-- src/webui/service/templates/slice/detail.html | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index ef5253b87..fca107141 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -98,6 +98,7 @@ def create_app(use_config=None, web_app_root=None): app.jinja_env.globals.update({ # pylint: disable=no-member 'enumerate' : enumerate, 'json_to_list' : json_to_list, + 'round' : round, 'get_working_context' : get_working_context, 'get_working_topology': get_working_topology, }) diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index b267f986c..1cc115a9b 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -141,7 +141,7 @@ SLA Capacity - - {{ constraint.sla_capacity.capacity_gbps }} Gbps + {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps {% elif constraint.WhichOneof('constraint')=='sla_latency' %} @@ -149,7 +149,7 @@ SLA E2E Latency - - {{ constraint.sla_latency.e2e_latency_ms }} ms + {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms {% elif constraint.WhichOneof('constraint')=='sla_availability' %} diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 2c1b55afb..f2adff751 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -141,7 +141,7 @@ SLA Capacity - - {{ constraint.sla_capacity.capacity_gbps }} Gbps + {{ round(constraint.sla_capacity.capacity_gbps, ndigits=2) }} Gbps {% elif constraint.WhichOneof('constraint')=='sla_latency' %} @@ -149,7 +149,7 @@ SLA E2E Latency - - {{ constraint.sla_latency.e2e_latency_ms }} ms + {{ round(constraint.sla_latency.e2e_latency_ms, ndigits=2) }} ms {% elif constraint.WhichOneof('constraint')=='sla_availability' %} -- GitLab From 27aa1a2afb92a76a9bc629ffbf508eb26c58dd52 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:40:11 +0000 Subject: [PATCH 40/43] Common - Tools - Object Factory: - Removed availability percentage from SLA Availability constraint (new feature) --- src/common/tools/object_factory/Constraint.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/common/tools/object_factory/Constraint.py b/src/common/tools/object_factory/Constraint.py index ef00e3872..0df049ab7 100644 --- a/src/common/tools/object_factory/Constraint.py +++ b/src/common/tools/object_factory/Constraint.py @@ -29,9 +29,9 @@ def json_constraint_endpoint_location_gps(endpoint_id : Dict, latitude : float, def json_constraint_endpoint_priority(endpoint_id : Dict, priority : int) -> Dict: return {'endpoint_priority': {'endpoint_id': endpoint_id, 'priority': priority}} -def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool, availability : float) -> Dict: +def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool) -> Dict: return {'sla_availability': { - 'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active, 'availability': availability + 'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active }} def json_constraint_sla_capacity(capacity_gbps : float) -> Dict: -- GitLab From 53ea4960553dde9b9957b030bc01a84cdbfdf50b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:40:34 +0000 Subject: [PATCH 41/43] Load Generator component: - Removed availability percentage from SLA Availability constraint (new feature) --- src/load_generator/load_gen/RequestGenerator.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index 0ada285bc..e7988760b 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -230,12 +230,11 @@ class RequestGenerator: ] if request_type == RequestType.SERVICE_L2NM: - availability = round(random.uniform(0.0, 99.99), ndigits=2) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ - json_constraint_sla_availability(1, True, availability), + json_constraint_sla_availability(1, True), json_constraint_sla_capacity(capacity_gbps), json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), json_constraint_sla_latency(e2e_latency_ms), @@ -275,12 +274,11 @@ class RequestGenerator: request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) elif request_type == RequestType.SERVICE_L3NM: - availability = round(random.uniform(0.0, 99.99), ndigits=2) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ - json_constraint_sla_availability(1, True, availability), + json_constraint_sla_availability(1, True), json_constraint_sla_capacity(capacity_gbps), json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), json_constraint_sla_latency(e2e_latency_ms), @@ -380,11 +378,10 @@ class RequestGenerator: json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), ] - availability = round(random.uniform(0.0, 99.99), ndigits=2) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ - json_constraint_sla_availability(1, True, availability), + json_constraint_sla_availability(1, True), json_constraint_sla_capacity(capacity_gbps), json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), json_constraint_sla_latency(e2e_latency_ms), -- GitLab From 97f4422f3445c572233ba9ebe8e2e20007986402 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:40:52 +0000 Subject: [PATCH 42/43] Path Computation component - Frontend: - Removed availability percentage from SLA Availability constraint (new feature) --- src/pathcomp/frontend/tests/test_unitary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py index 8088259b8..5d642cf4c 100644 --- a/src/pathcomp/frontend/tests/test_unitary.py +++ b/src/pathcomp/frontend/tests/test_unitary.py @@ -203,7 +203,7 @@ def test_request_service_kdisjointpath( endpoint_ids, constraints = [], [ json_constraint_sla_capacity(10.0), json_constraint_sla_latency(12.0), - json_constraint_sla_availability(2, True, 50.0), + json_constraint_sla_availability(2, True), json_constraint_custom('diversity', {'end-to-end-diverse': 'all-other-accesses'}), ] -- GitLab From 3c6645073de16c768035437166eb522d4ed3e104 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:50:16 +0000 Subject: [PATCH 43/43] Common - Tools - gRPC Helpers: - Removed availability percentage from SLA Availability constraint (new feature) --- src/common/tools/grpc/Constraints.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py index 07f0b7782..f33e7b1ef 100644 --- a/src/common/tools/grpc/Constraints.py +++ b/src/common/tools/grpc/Constraints.py @@ -160,7 +160,7 @@ def update_constraint_sla_latency(constraints, e2e_latency_ms : float) -> Constr return constraint def update_constraint_sla_availability( - constraints, num_disjoint_paths : int, all_active : bool, availability : float + constraints, num_disjoint_paths : int, all_active : bool ) -> Constraint: for constraint in constraints: if constraint.WhichOneof('constraint') != 'sla_availability': continue @@ -171,7 +171,6 @@ def update_constraint_sla_availability( constraint.sla_availability.num_disjoint_paths = num_disjoint_paths constraint.sla_availability.all_active = all_active - constraint.sla_availability.availability = availability return constraint def update_constraint_sla_isolation(constraints, isolation_levels : List[int]) -> Constraint: @@ -240,8 +239,7 @@ def copy_constraints(source_constraints, target_constraints): sla_availability = source_constraint.sla_availability num_disjoint_paths = sla_availability.num_disjoint_paths all_active = sla_availability.all_active - availability = sla_availability.availability - update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active, availability) + update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active) elif constraint_kind == 'sla_isolation': sla_isolation = source_constraint.sla_isolation -- GitLab