From 223e1cd2a4d88cadb4a241c65bc32c84815ef63e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Feb 2023 13:53:30 +0000 Subject: [PATCH 01/34] Slice component: - initial code blocks and details for slice grouping --- src/slice/requirements.in | 3 +- src/slice/service/README.md | 38 +++++++++++++++++++ src/slice/service/SliceGrouper.py | 61 +++++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 src/slice/service/README.md create mode 100644 src/slice/service/SliceGrouper.py diff --git a/src/slice/requirements.in b/src/slice/requirements.in index daef740da..42a96f5a5 100644 --- a/src/slice/requirements.in +++ b/src/slice/requirements.in @@ -12,5 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - #deepdiff==5.8.* +numpy==1.23.* +scikit-learn==1.1.* diff --git a/src/slice/service/README.md b/src/slice/service/README.md new file mode 100644 index 000000000..696b4a6e0 --- /dev/null +++ b/src/slice/service/README.md @@ -0,0 +1,38 @@ +# SLICE GROUPING details + +## Description +- Similar slice requests can share underlying services. +- Clustering algorithm for slice grouping. +- Consider both paths and SLA constraints. +- SLA monitored by slice group. + +## TFS Target Objective +- Objective 3.2: Provisioning of multi-tenant transport network slices. +- Improve network resource usage by 30% by adopting multi-tenancy resource allocation algorithms. +- Optimal slice grouping: trade-offs between economies of scale and limitations as to which SLAs can be grouped together need to be considered. +- Optimal grouping of slices is required to maximise KPIs, such as resource utilisation, utility of the connectivity, and energy efficiency. +- In this context, trade-offs between the resulting control plane complexity and differential treatment of SLA classes should be considered. + +## New Requirements +- User can select if slice grouping is performed per-slice request. +- Slice grouping introduces a clustering algorithm for finding service optimisation while preserving slice SLA. +- Service (re-)optimisation is provided. + +## TFS Architecture Update +- Update Slice service RPC to include Slice Grouping. +- Use novel Slice model with SLA constraints. +- Use Policy Component with action to update services to apply slice grouping. +- Describe Slice service operation modes: per-request or user-triggered. + + OSS/BSS --> Slice : Create Slice with SLA (slice) + Slice --> Slice : Slice Grouping (slice) +alt [slice can be grouped to other slice services] + // do nothing and return existing slice +else [slice needs new services] + Slice --> ... : normal logic +end alt + Slice --> OSS/BSS : slice + +slice.proto: + rpc OrderSliceWithSLA(context.Slice) returns (context.SliceId) {} // If slice with SLA already exists, returns slice. If not, it creates it. + rpc RunSliceGrouping (context.Empty) returns (context.Empty) {} // Optimizes the underlying services and re-maps them to the requested slices. diff --git a/src/slice/service/SliceGrouper.py b/src/slice/service/SliceGrouper.py new file mode 100644 index 000000000..e5363de1b --- /dev/null +++ b/src/slice/service/SliceGrouper.py @@ -0,0 +1,61 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#import numpy as np +#import pandas as pd +from matplotlib import pyplot as plt +from sklearn.datasets import make_blobs +from sklearn.cluster import KMeans +from common.proto.context_pb2 import ContextId +from context.client.ContextClient import ContextClient + +class SliceGrouper: + def __init__(self) -> None: + pass + + def load_slices(self, context_uuid : str) -> None: + context_client = ContextClient() + + + context_client.ListSlices(ContextId) + +X, y = make_blobs(n_samples=300, n_features=2, cluster_std=[(10,.1),(100,.01)],centers= [(10,.9), (100,.99)]) + +plt.scatter(X[:,0], X[:,1]) +plt.show() + + +wcss = [] +for i in range(1, 11): + kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) + kmeans.fit(X) + wcss.append(kmeans.inertia_) +plt.plot(range(1, 11), wcss) +plt.title('Elbow Method') +plt.xlabel('Number of clusters') +plt.ylabel('WCSS') +plt.show() + + +kmeans = KMeans(n_clusters=2, init='k-means++', max_iter=300, n_init=10, random_state=0) +pred_y = kmeans.fit_predict(X) +plt.scatter(X[:,0], X[:,1]) +plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red') +plt.ylabel('service-slo-availability') +plt.xlabel('service-slo-one-way-bandwidth') +ax = plt.subplot(1, 1, 1) + +ax.set_ylim(bottom=0., top=1.) +ax.set_xlim(left=0.) +plt.show() -- GitLab From e8de1c11965351579995b1f00977c2dec4032fd1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 10 Feb 2023 10:38:46 +0000 Subject: [PATCH 02/34] Slice component: - Updated SliceClient class with missing methods --- src/slice/client/SliceClient.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/slice/client/SliceClient.py b/src/slice/client/SliceClient.py index a3e5d6490..792a2037f 100644 --- a/src/slice/client/SliceClient.py +++ b/src/slice/client/SliceClient.py @@ -65,3 +65,17 @@ class SliceClient: response = self.stub.DeleteSlice(request) LOGGER.debug('DeleteSlice result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def OrderSliceWithSLA(self, request : Slice) -> SliceId: + LOGGER.debug('OrderSliceWithSLA request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.OrderSliceWithSLA(request) + LOGGER.debug('OrderSliceWithSLA result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RunSliceGrouping(self, request : Empty) -> Empty: + LOGGER.debug('RunSliceGrouping request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RunSliceGrouping(request) + LOGGER.debug('RunSliceGrouping result: {:s}'.format(grpc_message_to_json_string(response))) + return response -- GitLab From ffa4efdd71b8515fccc34dedb984197211b837d5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:19:55 +0000 Subject: [PATCH 03/34] Proto: - extended Constraint_SLA_Availability with field availability --- proto/context.proto | 1 + 1 file changed, 1 insertion(+) diff --git a/proto/context.proto b/proto/context.proto index e403c4a22..49d16229c 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -509,6 +509,7 @@ message Constraint_SLA_Capacity { message Constraint_SLA_Availability { uint32 num_disjoint_paths = 1; bool all_active = 2; + float availability = 3; // 0.0 .. 100.0 percentage of availability } enum IsolationLevelEnum { -- GitLab From 643ae39a06a21b5a74a3003e34a017e3478a8d7b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:20:37 +0000 Subject: [PATCH 04/34] WebUI component: - extended Constraint_SLA_Availability with field availability --- src/webui/service/templates/service/detail.html | 1 + src/webui/service/templates/slice/detail.html | 1 + 2 files changed, 2 insertions(+) diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index b267f986c..d99ede3e0 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -157,6 +157,7 @@ SLA Availability - + {{ constraint.sla_availability.availability }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 2c1b55afb..6c8d15aed 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -157,6 +157,7 @@ SLA Availability - + {{ constraint.sla_availability.availability }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active -- GitLab From d78a15f1da26c88c55daf715bca6db9efb5579cf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:21:30 +0000 Subject: [PATCH 05/34] Proto: - implemented enhanced methods to control load generator form WebUI --- proto/load_generator.proto | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/proto/load_generator.proto b/proto/load_generator.proto index 98f6eefda..86f946958 100644 --- a/proto/load_generator.proto +++ b/proto/load_generator.proto @@ -18,6 +18,36 @@ package load_generator; import "context.proto"; service LoadGeneratorService { - rpc Start(context.Empty) returns (context.Empty) {} - rpc Stop (context.Empty) returns (context.Empty) {} + rpc Start (Parameters ) returns (context.Empty) {} + rpc GetStatus(context.Empty) returns (Status ) {} + rpc Stop (context.Empty) returns (context.Empty) {} +} + +enum RequestTypeEnum { + REQUESTTYPE_UNDEFINED = 0; + REQUESTTYPE_SERVICE_L2NM = 1; + REQUESTTYPE_SERVICE_L3NM = 2; + REQUESTTYPE_SERVICE_MW = 3; + REQUESTTYPE_SERVICE_TAPI = 4; + REQUESTTYPE_SLICE_L2NM = 5; + REQUESTTYPE_SLICE_L3NM = 6; +} + +message Parameters { + uint64 num_requests = 1; // if == 0, generate infinite requests + repeated RequestTypeEnum request_types = 2; + float offered_load = 3; + float holding_time = 4; + float inter_arrival_time = 5; + bool do_teardown = 6; + bool dry_mode = 7; + bool record_to_dlt = 8; + string dlt_domain_id = 9; +} + +message Status { + Parameters parameters = 1; + uint64 num_generated = 2; + bool infinite_loop = 3; + bool running = 4; } -- GitLab From 73efb256970cf507b9ca09763bdd292beebd1e7b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:23:28 +0000 Subject: [PATCH 06/34] Load Generator component: - Updated client class methods according to proto - RequestScheduler: added methods to report status - Extended servicer class to support parametrization --- .../client/LoadGeneratorClient.py | 10 ++- .../load_gen/RequestScheduler.py | 19 ++++-- src/load_generator/service/Constants.py | 27 ++++++++ .../LoadGeneratorServiceServicerImpl.py | 65 +++++++++++++------ 4 files changed, 95 insertions(+), 26 deletions(-) create mode 100644 src/load_generator/service/Constants.py diff --git a/src/load_generator/client/LoadGeneratorClient.py b/src/load_generator/client/LoadGeneratorClient.py index 99626bbbb..2bed40dfd 100644 --- a/src/load_generator/client/LoadGeneratorClient.py +++ b/src/load_generator/client/LoadGeneratorClient.py @@ -16,6 +16,7 @@ import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2 import Parameters, Status from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -46,12 +47,19 @@ class LoadGeneratorClient: self.stub = None @RETRY_DECORATOR - def Start(self, request : Empty) -> Empty: + def Start(self, request : Parameters) -> Empty: LOGGER.debug('Start request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.Start(request) LOGGER.debug('Start result: {:s}'.format(grpc_message_to_json_string(response))) return response + @RETRY_DECORATOR + def GetStatus(self, request : Empty) -> Status: + LOGGER.debug('GetStatus request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.GetStatus(request) + LOGGER.debug('GetStatus result: {:s}'.format(grpc_message_to_json_string(response))) + return response + @RETRY_DECORATOR def Stop(self, request : Empty) -> Empty: LOGGER.debug('Stop request: {:s}'.format(grpc_message_to_json_string(request))) diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py index 775da1580..e1003376a 100644 --- a/src/load_generator/load_gen/RequestScheduler.py +++ b/src/load_generator/load_gen/RequestScheduler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, logging, pytz, random +import copy, logging, pytz, random, threading from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.blocking import BlockingScheduler @@ -46,13 +46,22 @@ class RequestScheduler: timezone=pytz.utc) self._parameters = parameters self._generator = generator + self._running = threading.Event() + + @property + def num_generated(self): return max(0, self._generator.num_requests_generated - 1) # first increases, then checks + + @property + def infinite_loop(self): return self._parameters.num_requests == 0 + + @property + def running(self): return self._running.is_set() def _schedule_request_setup(self) -> None: - infinite_loop = self._parameters.num_requests == 0 - num_requests_generated = self._generator.num_requests_generated - 1 # because it first increases, then checks - if not infinite_loop and (num_requests_generated >= self._parameters.num_requests): + if not self.infinite_loop and (self.num_generated >= self._parameters.num_requests): LOGGER.info('Generation Done!') #self._scheduler.shutdown() + self._running.clear() return iat = random.expovariate(1.0 / self._parameters.inter_arrival_time) run_date = datetime.utcnow() + timedelta(seconds=iat) @@ -66,11 +75,13 @@ class RequestScheduler: self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc) def start(self): + self._running.set() self._schedule_request_setup() self._scheduler.start() def stop(self): self._scheduler.shutdown() + self._running.clear() def _request_setup(self) -> None: self._schedule_request_setup() diff --git a/src/load_generator/service/Constants.py b/src/load_generator/service/Constants.py new file mode 100644 index 000000000..6c339877c --- /dev/null +++ b/src/load_generator/service/Constants.py @@ -0,0 +1,27 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.load_generator_pb2 import RequestTypeEnum +from load_generator.load_gen.Constants import RequestType + +REQUEST_TYPE_MAP = { + RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM : RequestType.SERVICE_L2NM, + RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM : RequestType.SERVICE_L3NM, + RequestTypeEnum.REQUESTTYPE_SERVICE_MW : RequestType.SERVICE_MW, + RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI : RequestType.SERVICE_TAPI, + RequestTypeEnum.REQUESTTYPE_SLICE_L2NM : RequestType.SLICE_L2NM, + RequestTypeEnum.REQUESTTYPE_SLICE_L3NM : RequestType.SLICE_L3NM, +} + +REQUEST_TYPE_REVERSE_MAP = {v:k for k,v in REQUEST_TYPE_MAP.items()} diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py index c280581dd..d66b0b2c1 100644 --- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py +++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py @@ -12,43 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional import grpc, logging +from typing import Optional from apscheduler.schedulers.background import BackgroundScheduler from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2 import Parameters, Status from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer -from load_generator.load_gen.Constants import RequestType -from load_generator.load_gen.Parameters import Parameters +from load_generator.load_gen.Parameters import Parameters as LoadGen_Parameters from load_generator.load_gen.RequestGenerator import RequestGenerator from load_generator.load_gen.RequestScheduler import RequestScheduler +from .Constants import REQUEST_TYPE_MAP, REQUEST_TYPE_REVERSE_MAP LOGGER = logging.getLogger(__name__) class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') - self._parameters = Parameters( - num_requests = 100, - request_types = [ - RequestType.SERVICE_L2NM, - RequestType.SERVICE_L3NM, - #RequestType.SERVICE_MW, - #RequestType.SERVICE_TAPI, - RequestType.SLICE_L2NM, - RequestType.SLICE_L3NM, - ], - offered_load = 50, - holding_time = 10, - do_teardown = True, - dry_mode = False, # in dry mode, no request is sent to TeraFlowSDN - record_to_dlt = False, # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT - dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False - ) self._generator : Optional[RequestGenerator] = None self._scheduler : Optional[RequestScheduler] = None LOGGER.debug('Servicer Created') - def Start(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def Start(self, request : Parameters, context : grpc.ServicerContext) -> Empty: + self._parameters = LoadGen_Parameters( + num_requests = request.num_requests, + request_types = [REQUEST_TYPE_MAP[rt] for rt in request.request_types], + offered_load = request.offered_load if request.offered_load > 1.e-12 else None, + holding_time = request.holding_time if request.holding_time > 1.e-12 else None, + inter_arrival_time = request.inter_arrival_time if request.inter_arrival_time > 1.e-12 else None, + do_teardown = request.do_teardown, # if set, schedule tear down of requests + dry_mode = request.dry_mode, # in dry mode, no request is sent to TeraFlowSDN + record_to_dlt = request.record_to_dlt, # if set, upload changes to DLT + dlt_domain_id = request.dlt_domain_id, # domain used to uploaded entities (when record_to_dlt = True) + ) + LOGGER.info('Initializing Generator...') self._generator = RequestGenerator(self._parameters) self._generator.initialize() @@ -58,6 +54,33 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): self._scheduler.start() return Empty() + def GetStatus(self, request : Empty, context : grpc.ServicerContext) -> Status: + if self._scheduler is None: + # not started + status = Status() + status.num_generated = 0 + status.infinite_loop = False + status.running = False + return status + + params = self._scheduler._parameters + request_types = [REQUEST_TYPE_REVERSE_MAP[rt] for rt in params.request_types] + + status = Status() + status.num_generated = self._scheduler.num_generated + status.infinite_loop = self._scheduler.infinite_loop + status.running = self._scheduler.running + status.parameters.num_requests = params.num_requests # pylint: disable=no-member + status.parameters.offered_load = params.offered_load # pylint: disable=no-member + status.parameters.holding_time = params.holding_time # pylint: disable=no-member + status.parameters.inter_arrival_time = params.inter_arrival_time # pylint: disable=no-member + status.parameters.do_teardown = params.do_teardown # pylint: disable=no-member + status.parameters.dry_mode = params.dry_mode # pylint: disable=no-member + status.parameters.record_to_dlt = params.record_to_dlt # pylint: disable=no-member + status.parameters.dlt_domain_id = params.dlt_domain_id # pylint: disable=no-member + status.parameters.request_types.extend(request_types) # pylint: disable=no-member + return status + def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty: if self._scheduler is not None: self._scheduler.stop() -- GitLab From b6fdf507b6828cb2f3eced035cb5a7633fd9e0ec Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:25:14 +0000 Subject: [PATCH 07/34] WebUI component: - added section to control load generator - updated basic Debug API endpoints --- src/webui/service/load_gen/forms.py | 45 +++++ src/webui/service/load_gen/routes.py | 109 +++++++++--- src/webui/service/templates/base.html | 8 +- .../service/templates/load_gen/home.html | 164 ++++++++++++++++++ src/webui/service/templates/main/debug.html | 26 +-- 5 files changed, 307 insertions(+), 45 deletions(-) create mode 100644 src/webui/service/load_gen/forms.py create mode 100644 src/webui/service/templates/load_gen/home.html diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py new file mode 100644 index 000000000..3144fbe5d --- /dev/null +++ b/src/webui/service/load_gen/forms.py @@ -0,0 +1,45 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask_wtf import FlaskForm +from wtforms import BooleanField, FloatField, IntegerField, StringField, SubmitField +from wtforms.validators import DataRequired, NumberRange + +class LoadGenStartForm(FlaskForm): + num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)]) + num_generated = IntegerField('Num Generated', render_kw={'readonly': True}) + + request_type_service_l2nm = BooleanField('Service L2NM', default=False) + request_type_service_l3nm = BooleanField('Service L3NM', default=False) + request_type_service_mw = BooleanField('Service MW', default=False) + request_type_service_tapi = BooleanField('Service TAPI', default=False) + request_type_slice_l2nm = BooleanField('Slice L2NM', default=True) + request_type_slice_l3nm = BooleanField('Slice L3NM', default=False) + + offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=1.e-12)]) + holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=1.e-12)]) + inter_arrival_time = FloatField('Inter Arrival Time[seconds]', default=0, validators=[NumberRange(min=1.e-12)]) + + do_teardown = BooleanField('Do Teardown', default=True) + + record_to_dlt = BooleanField('Record to DLT', default=False) + dlt_domain_id = StringField('DLT Domain Id', default='') + + infinite_loop = BooleanField('Infinite Loop', render_kw={'readonly': True}) + running = BooleanField('Running', render_kw={'readonly': True}) + + submit = SubmitField('Start') + +class LoadGenStopForm(FlaskForm): + submit = SubmitField('Stop') diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py index 3118b6de0..893900908 100644 --- a/src/webui/service/load_gen/routes.py +++ b/src/webui/service/load_gen/routes.py @@ -14,32 +14,93 @@ from flask import render_template, Blueprint, flash from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2 import Parameters, RequestTypeEnum from load_generator.client.LoadGeneratorClient import LoadGeneratorClient +from .forms import LoadGenStartForm, LoadGenStopForm load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen') -@load_gen.route('start', methods=['GET']) -def start(): +@load_gen.route('home', methods=['GET', 'POST']) +def home(): load_gen_client = LoadGeneratorClient() - try: - load_gen_client.connect() - load_gen_client.Start(Empty()) - load_gen_client.close() - flash('Load Generator Started.', 'success') - except Exception as e: # pylint: disable=broad-except - flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger') - - return render_template('main/debug.html') - -@load_gen.route('stop', methods=['GET']) -def stop(): - load_gen_client = LoadGeneratorClient() - try: - load_gen_client.connect() - load_gen_client.Stop(Empty()) - load_gen_client.close() - flash('Load Generator Stoped.', 'success') - except Exception as e: # pylint: disable=broad-except - flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger') - - return render_template('main/debug.html') + + form_start = LoadGenStartForm() + form_stop = LoadGenStopForm() + + if form_start.validate_on_submit(): + load_gen_params = Parameters() + load_gen_params.num_requests = form_start.num_requests.data + load_gen_params.offered_load = form_start.offered_load.data + load_gen_params.holding_time = form_start.holding_time.data + load_gen_params.inter_arrival_time = form_start.inter_arrival_time.data + load_gen_params.do_teardown = form_start.do_teardown.data + load_gen_params.dry_mode = False + load_gen_params.record_to_dlt = form_start.record_to_dlt.data + load_gen_params.dlt_domain_id = form_start.dlt_domain_id.data + + del load_gen_params.request_types[:] # pylint: disable=no-member + request_types = list() + if form_start.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM) + if form_start.request_type_service_l3nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM) + if form_start.request_type_service_mw .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_MW ) + if form_start.request_type_service_tapi.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI) + if form_start.request_type_slice_l2nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L2NM ) + if form_start.request_type_slice_l3nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L3NM ) + load_gen_params.request_types.extend(request_types) # pylint: disable=no-member + + try: + load_gen_client.connect() + load_gen_client.Start(load_gen_params) + load_gen_client.close() + flash('Load Generator Started.', 'success') + except Exception as e: # pylint: disable=broad-except + flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger') + + if form_stop.validate_on_submit(): + try: + load_gen_client.connect() + load_gen_client.Stop(Empty()) + load_gen_client.close() + flash('Load Generator Stoped.', 'success') + except Exception as e: # pylint: disable=broad-except + flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger') + + load_gen_client.connect() + status = load_gen_client.GetStatus(Empty()) + load_gen_client.close() + + form_start.num_requests .default = status.parameters.num_requests + form_start.offered_load .default = status.parameters.offered_load + form_start.holding_time .default = status.parameters.holding_time + form_start.inter_arrival_time.default = status.parameters.inter_arrival_time + form_start.do_teardown .default = status.parameters.do_teardown + form_start.record_to_dlt .default = status.parameters.record_to_dlt + form_start.dlt_domain_id .default = status.parameters.dlt_domain_id + form_start.num_generated .default = status.num_generated + form_start.infinite_loop .default = status.infinite_loop + form_start.running .default = status.running + + request_types = status.parameters.request_types + form_start.request_type_service_l2nm.default = RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM in request_types + form_start.request_type_service_l3nm.default = RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM in request_types + form_start.request_type_service_mw .default = RequestTypeEnum.REQUESTTYPE_SERVICE_MW in request_types + form_start.request_type_service_tapi.default = RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI in request_types + form_start.request_type_slice_l2nm .default = RequestTypeEnum.REQUESTTYPE_SLICE_L2NM in request_types + form_start.request_type_slice_l3nm .default = RequestTypeEnum.REQUESTTYPE_SLICE_L3NM in request_types + + form_start.num_requests .render_kw['readonly'] = status.running + form_start.offered_load .render_kw['readonly'] = status.running + form_start.holding_time .render_kw['readonly'] = status.running + form_start.inter_arrival_time.render_kw['readonly'] = status.running + form_start.do_teardown .render_kw['readonly'] = status.running + form_start.record_to_dlt .render_kw['readonly'] = status.running + form_start.dlt_domain_id .render_kw['readonly'] = status.running + + form_start.request_type_service_l2nm.render_kw['readonly'] = status.running + form_start.request_type_service_l3nm.render_kw['readonly'] = status.running + form_start.request_type_service_mw .render_kw['readonly'] = status.running + form_start.request_type_service_tapi.render_kw['readonly'] = status.running + form_start.request_type_slice_l2nm .render_kw['readonly'] = status.running + form_start.request_type_slice_l3nm .render_kw['readonly'] = status.running + + return render_template('load_gen/home.html', form_start=form_start, form_stop=form_stop) diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 35999ebe1..1dfa36871 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -86,10 +86,16 @@ - + + +{% extends 'base.html' %} + +{% block content %} +

Load Generator

+
+ +
+ {{ form_start.hidden_tag() }} +
+
+ {{ form_start.num_requests.label(class="col-sm-2 col-form-label") }} +
+ {% if form_start.num_requests.errors %} + {{ form_start.num_requests(class="form-control is-invalid") }} +
+ {% for error in form_start.num_requests.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form_start.num_requests(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form_start.num_generated.label(class="col-sm-2 col-form-label") }} +
+ {% if form_start.num_generated.errors %} + {{ form_start.num_generated(class="form-control is-invalid") }} +
+ {% for error in form_start.num_generated.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form_start.num_generated(class="form-control") }} + {% endif %} +
+
+
+ +
+
Service Types:
+
+ {{ form_start.request_type_service_l2nm }} {{ form_start.request_type_service_l2nm.label(class="col-sm-3 col-form-label") }} + {{ form_start.request_type_service_l3nm }} {{ form_start.request_type_service_l3nm.label(class="col-sm-3 col-form-label") }} + {{ form_start.request_type_service_mw }} {{ form_start.request_type_service_mw .label(class="col-sm-3 col-form-label") }} + {{ form_start.request_type_service_tapi }} {{ form_start.request_type_service_tapi.label(class="col-sm-3 col-form-label") }} + {{ form_start.request_type_slice_l2nm }} {{ form_start.request_type_slice_l2nm .label(class="col-sm-3 col-form-label") }} + {{ form_start.request_type_slice_l3nm }} {{ form_start.request_type_slice_l3nm .label(class="col-sm-3 col-form-label") }} +
+
+
+ +
+ {{ form_start.offered_load.label(class="col-sm-2 col-form-label") }} +
+ {% if form_start.offered_load.errors %} + {{ form_start.offered_load(class="form-control is-invalid") }} +
+ {% for error in form_start.offered_load.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form_start.offered_load(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form_start.holding_time.label(class="col-sm-2 col-form-label") }} +
+ {% if form_start.holding_time.errors %} + {{ form_start.holding_time(class="form-control is-invalid") }} +
+ {% for error in form_start.holding_time.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form_start.holding_time(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form_start.inter_arrival_time.label(class="col-sm-2 col-form-label") }} +
+ {% if form_start.inter_arrival_time.errors %} + {{ form_start.inter_arrival_time(class="form-control is-invalid") }} +
+ {% for error in form_start.inter_arrival_time.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form_start.inter_arrival_time(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form_start.do_teardown(class="form-control") }} {{ form_start.do_teardown.label(class="col-sm-2 col-form-label") }} +
+
+ +
+ {{ form_start.record_to_dlt(class="form-control") }} {{ form_start.record_to_dlt.label(class="col-sm-2 col-form-label") }} +
+
+ +
+ {{ form_start.dlt_domain_id.label(class="col-sm-2 col-form-label") }} +
+ {% if form_start.dlt_domain_id.errors %} + {{ form_start.dlt_domain_id(class="form-control is-invalid") }} +
+ {% for error in form_start.dlt_domain_id.errors %}{{ error }}{% endfor %} +
+ {% else %} + {{ form_start.dlt_domain_id(class="form-control") }} + {% endif %} +
+
+
+ +
+ {{ form_start.infinite_loop(class="form-control") }} {{ form_start.infinite_loop.label(class="col-sm-2 col-form-label") }} +
+
+ +
+ {{ form_start.running(class="form-control") }} {{ form_start.running.label(class="col-sm-2 col-form-label") }} +
+
+ +
+ {{ form_start.submit(class="btn btn-primary") }} +
+
+
+ +
+ {{ form_stop.hidden_tag() }} +
+
+ {{ form_stop.submit(class="btn btn-primary") }} +
+
+
+ +{% endblock %} diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html index 11a868fdf..eef42ae9a 100644 --- a/src/webui/service/templates/main/debug.html +++ b/src/webui/service/templates/main/debug.html @@ -17,26 +17,12 @@ {% extends 'base.html' %} {% block content %} -

Debug

+

Debug API

- - -

Load Generator:

- Start - Stop + {% endblock %} -- GitLab From 505db5ca87fa425d39fc6b888c65878174d3398e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:38:36 +0000 Subject: [PATCH 08/34] Slice component: - first complete implementation of slice grouper (under debug) --- src/slice/requirements.in | 3 + src/slice/service/SliceGrouper.py | 61 -------- src/slice/service/SliceServiceServicerImpl.py | 8 + src/slice/service/slice_grouper/Constants.py | 22 +++ .../service/slice_grouper/MetricsExporter.py | 126 +++++++++++++++ .../service/slice_grouper/SliceGrouper.py | 94 ++++++++++++ src/slice/service/slice_grouper/Tools.py | 145 ++++++++++++++++++ src/slice/service/slice_grouper/__init__.py | 14 ++ 8 files changed, 412 insertions(+), 61 deletions(-) delete mode 100644 src/slice/service/SliceGrouper.py create mode 100644 src/slice/service/slice_grouper/Constants.py create mode 100644 src/slice/service/slice_grouper/MetricsExporter.py create mode 100644 src/slice/service/slice_grouper/SliceGrouper.py create mode 100644 src/slice/service/slice_grouper/Tools.py create mode 100644 src/slice/service/slice_grouper/__init__.py diff --git a/src/slice/requirements.in b/src/slice/requirements.in index 42a96f5a5..854c71a59 100644 --- a/src/slice/requirements.in +++ b/src/slice/requirements.in @@ -14,4 +14,7 @@ #deepdiff==5.8.* numpy==1.23.* +pandas==1.5.* +questdb==1.0.1 +requests==2.27.* scikit-learn==1.1.* diff --git a/src/slice/service/SliceGrouper.py b/src/slice/service/SliceGrouper.py deleted file mode 100644 index e5363de1b..000000000 --- a/src/slice/service/SliceGrouper.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#import numpy as np -#import pandas as pd -from matplotlib import pyplot as plt -from sklearn.datasets import make_blobs -from sklearn.cluster import KMeans -from common.proto.context_pb2 import ContextId -from context.client.ContextClient import ContextClient - -class SliceGrouper: - def __init__(self) -> None: - pass - - def load_slices(self, context_uuid : str) -> None: - context_client = ContextClient() - - - context_client.ListSlices(ContextId) - -X, y = make_blobs(n_samples=300, n_features=2, cluster_std=[(10,.1),(100,.01)],centers= [(10,.9), (100,.99)]) - -plt.scatter(X[:,0], X[:,1]) -plt.show() - - -wcss = [] -for i in range(1, 11): - kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) - kmeans.fit(X) - wcss.append(kmeans.inertia_) -plt.plot(range(1, 11), wcss) -plt.title('Elbow Method') -plt.xlabel('Number of clusters') -plt.ylabel('WCSS') -plt.show() - - -kmeans = KMeans(n_clusters=2, init='k-means++', max_iter=300, n_init=10, random_state=0) -pred_y = kmeans.fit_predict(X) -plt.scatter(X[:,0], X[:,1]) -plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red') -plt.ylabel('service-slo-availability') -plt.xlabel('service-slo-one-way-bandwidth') -ax = plt.subplot(1, 1, 1) - -ax.set_ylim(bottom=0., top=1.) -ax.set_xlim(left=0.) -plt.show() diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 21d820089..fe2377f91 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -28,6 +28,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from interdomain.client.InterdomainClient import InterdomainClient from service.client.ServiceClient import ServiceClient +from .slice_grouper.SliceGrouper import SliceGrouper LOGGER = logging.getLogger(__name__) @@ -36,6 +37,7 @@ METRICS_POOL = MetricsPool('Slice', 'RPC') class SliceServiceServicerImpl(SliceServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') + self._slice_grouper = SliceGrouper() LOGGER.debug('Servicer Created') def create_update(self, request : Slice) -> SliceId: @@ -82,6 +84,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client.SetSlice(slice_active) return slice_id + if self._slice_grouper.is_enabled: + grouped = self._slice_grouper.group(slice_with_uuids) + # Local domain slice service_id = ServiceId() # pylint: disable=no-member @@ -202,6 +207,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): current_slice.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member context_client.SetSlice(current_slice) + if self._slice_grouper.is_enabled: + ungrouped = self._slice_grouper.ungroup(current_slice) + service_client = ServiceClient() for service_id in _slice.slice_service_ids: current_slice = Slice() diff --git a/src/slice/service/slice_grouper/Constants.py b/src/slice/service/slice_grouper/Constants.py new file mode 100644 index 000000000..2edd853a2 --- /dev/null +++ b/src/slice/service/slice_grouper/Constants.py @@ -0,0 +1,22 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: define by means of settings +SLICE_GROUPS = [ + ('bronze', 10.0, 10.0), # Bronze (10%, 10Gb/s) + ('silver', 30.0, 40.0), # Silver (30%, 40Gb/s) + ('gold', 70.0, 50.0), # Gold (70%, 50Gb/s) + ('platinum', 99.0, 100.0), # Platinum (99%, 100Gb/s) +] +SLICE_GROUP_NAMES = {slice_group[0] for slice_group in SLICE_GROUPS} diff --git a/src/slice/service/slice_grouper/MetricsExporter.py b/src/slice/service/slice_grouper/MetricsExporter.py new file mode 100644 index 000000000..d6738ac3e --- /dev/null +++ b/src/slice/service/slice_grouper/MetricsExporter.py @@ -0,0 +1,126 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, logging, os, requests +from typing import Any, Literal, Union +from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module + +LOGGER = logging.getLogger(__name__) + +MAX_RETRIES = 10 +DELAY_RETRIES = 0.5 + +MSG_EXPORT_EXECUTED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed' +MSG_EXPORT_FAILED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...' +MSG_REST_BAD_STATUS = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}' +MSG_REST_EXECUTED = '[rest_request] Query({:s}) executed, result: {:s}' +MSG_REST_FAILED = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...' +MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}' + +METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') +METRICSDB_ILP_PORT = int(os.environ.get('METRICSDB_ILP_PORT')) +METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT')) +METRICSDB_TABLE = 'slice_groups' + +COLORS = { + 'platinum': '#E5E4E2', + 'gold' : '#FFD700', + 'silver' : '#808080', + 'bronze' : '#CD7F32', +} +DEFAULT_COLOR = '#000000' # black + +SQL_MARK_DELETED = "UPDATE {:s} SET is_deleted='true' WHERE slice_uuid='{:s}';" + +class MetricsExporter(): + def create_table(self) -> None: + sql_query = ' '.join([ + 'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE)), + ','.join([ + 'timestamp TIMESTAMP', + 'slice_uuid SYMBOL', + 'slice_group SYMBOL', + 'slice_color SYMBOL', + 'is_deleted SYMBOL', + 'slice_availability DOUBLE', + 'slice_capacity_center DOUBLE', + 'slice_capacity DOUBLE', + ]), + ') TIMESTAMP(timestamp);' + ]) + try: + result = self.rest_request(sql_query) + if not result: raise Exception + LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE))) + except Exception as e: + LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE), str(e))) + raise + + def export_point( + self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float, + is_center : bool = False + ) -> None: + dt_timestamp = datetime.datetime.utcnow() + slice_color = COLORS.get(slice_group, DEFAULT_COLOR) + symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color, is_deleted='false') + columns = dict(slice_availability=slice_availability) + columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity + + for retry in range(MAX_RETRIES): + try: + with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender: + sender.row(METRICSDB_TABLE, symbols=symbols, columns=columns, at=dt_timestamp) + sender.flush() + LOGGER.info(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns))) + return + except (Exception, IngressError): # pylint: disable=broad-except + LOGGER.exception(MSG_EXPORT_FAILED.format( + str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) + + def delete_point(self, slice_uuid : str) -> None: + sql_query = SQL_MARK_DELETED.format(str(METRICSDB_TABLE), slice_uuid) + try: + result = self.rest_request(sql_query) + if not result: raise Exception + LOGGER.info('Point {:s} deleted'.format(str(slice_uuid))) + except Exception as e: + LOGGER.warning('Point {:s} cannot be deleted. {:s}'.format(str(slice_uuid), str(e))) + raise + + def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]: + url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT) + params = {'query': rest_query, 'fmt': 'json'} + + for retry in range(MAX_RETRIES): + try: + response = requests.get(url, params=params) + status_code = response.status_code + if status_code not in {200}: + str_content = response.content.decode('UTF-8') + raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content)) + + json_response = response.json() + if 'ddl' in json_response: + LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl']))) + return True + elif 'dataset' in json_response: + LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset']))) + return json_response['dataset'] + + except Exception: # pylint: disable=broad-except + LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) diff --git a/src/slice/service/slice_grouper/SliceGrouper.py b/src/slice/service/slice_grouper/SliceGrouper.py new file mode 100644 index 000000000..735d02899 --- /dev/null +++ b/src/slice/service/slice_grouper/SliceGrouper.py @@ -0,0 +1,94 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pandas, threading +from typing import Dict, Optional, Tuple +from sklearn.cluster import KMeans +from common.proto.context_pb2 import Slice +from common.tools.grpc.Tools import grpc_message_to_json_string +from .Constants import SLICE_GROUPS +from .MetricsExporter import MetricsExporter +from .Tools import ( + add_slice_to_group, create_slice_groups, get_slice_grouping_parameters, is_slice_grouping_enabled, + remove_slice_from_group) + +LOGGER = logging.getLogger(__name__) + +class SliceGrouper: + def __init__(self) -> None: + self._lock = threading.Lock() + self._is_enabled = is_slice_grouping_enabled() + if not self._is_enabled: return + + metrics_exporter = MetricsExporter() + metrics_exporter.create_table() + + self._slice_groups = create_slice_groups(SLICE_GROUPS) + + # Initialize and fit K-Means with the pre-defined clusters we want, i.e., one per slice group + df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps']) + k_means = KMeans(n_clusters=df_groups.shape[0]) + k_means.fit(df_groups[['availability', 'capacity_gbps']]) + df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity_gbps']]) + self._k_means = k_means + self._df_groups = df_groups + + self._group_mapping : Dict[str, Dict] = { + group['name']:{k:v for k,v in group.items() if k != 'name'} + for group in list(df_groups.to_dict('records')) + } + + label_to_group = {} + for group_name,group_attrs in self._group_mapping.items(): + label = group_attrs['label'] + availability = group_attrs['availability'] + capacity_gbps = group_attrs['capacity_gbps'] + metrics_exporter.export_point( + group_name, group_name, availability, capacity_gbps, is_center=True) + label_to_group[label] = group_name + self._label_to_group = label_to_group + + def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]: + with self._lock: + grouping_parameters = get_slice_grouping_parameters(slice_obj) + LOGGER.debug('[_select_group] grouping_parameters={:s}'.format(str(grouping_parameters))) + if grouping_parameters is None: return None + + sample = pandas.DataFrame([grouping_parameters], columns=['availability', 'capacity_gbps']) + sample['label'] = self._k_means.predict(sample) + sample = sample.to_dict('records')[0] # pylint: disable=unsubscriptable-object + LOGGER.debug('[_select_group] sample={:s}'.format(str(sample))) + label = sample['label'] + availability = sample['availability'] + capacity_gbps = sample['capacity_gbps'] + group_name = self._label_to_group[label] + LOGGER.debug('[_select_group] group_name={:s}'.format(str(group_name))) + return group_name, availability, capacity_gbps + + @property + def is_enabled(self): return self._is_enabled + + def group(self, slice_obj : Slice) -> bool: + LOGGER.debug('[group] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj))) + selected_group = self._select_group(slice_obj) + LOGGER.debug('[group] selected_group={:s}'.format(str(selected_group))) + if selected_group is None: return False + return add_slice_to_group(slice_obj, selected_group) + + def ungroup(self, slice_obj : Slice) -> bool: + LOGGER.debug('[ungroup] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj))) + selected_group = self._select_group(slice_obj) + LOGGER.debug('[ungroup] selected_group={:s}'.format(str(selected_group))) + if selected_group is None: return False + return remove_slice_from_group(slice_obj, selected_group) diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py new file mode 100644 index 000000000..1ff82bece --- /dev/null +++ b/src/slice/service/slice_grouper/Tools.py @@ -0,0 +1,145 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Optional, Set, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME +from common.Settings import get_setting +from common.method_wrappers.ServiceExceptions import NotFoundException +from common.proto.context_pb2 import IsolationLevelEnum, Slice, SliceId, SliceStatusEnum +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.Slice import get_slice +from context.client.ContextClient import ContextClient +from slice.service.slice_grouper.MetricsExporter import MetricsExporter + +SETTING_NAME_SLICE_GROUPING = 'SLICE_GROUPING' +TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'} + +NO_ISOLATION = IsolationLevelEnum.NO_ISOLATION + +def is_slice_grouping_enabled() -> bool: + is_enabled = get_setting(SETTING_NAME_SLICE_GROUPING, default=None) + if is_enabled is None: return False + str_is_enabled = str(is_enabled).upper() + return str_is_enabled in TRUE_VALUES + +def create_slice_group( + context_uuid : str, slice_name : str, capacity_gbps : float, availability : float +) -> Slice: + slice_group_obj = Slice() + slice_group_obj.slice_id.context_id.context_uuid.uuid = context_uuid # pylint: disable=no-member + slice_group_obj.slice_id.slice_uuid.uuid = slice_name # pylint: disable=no-member + slice_group_obj.name = slice_name + slice_group_obj.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member + #del slice_group_obj.slice_endpoint_ids[:] # no endpoints initially + #del slice_group_obj.slice_service_ids[:] # no sub-services + #del slice_group_obj.slice_subslice_ids[:] # no sub-slices + #del slice_group_obj.slice_config.config_rules[:] # no config rules + slice_group_obj.slice_owner.owner_uuid.uuid = 'TeraFlowSDN' # pylint: disable=no-member + slice_group_obj.slice_owner.owner_string = 'TeraFlowSDN' # pylint: disable=no-member + + constraint_sla_capacity = slice_group_obj.slice_constraints.add() # pylint: disable=no-member + constraint_sla_capacity.sla_capacity.capacity_gbps = capacity_gbps + + constraint_sla_availability = slice_group_obj.slice_constraints.add() # pylint: disable=no-member + constraint_sla_availability.sla_availability.num_disjoint_paths = 1 + constraint_sla_availability.sla_availability.all_active = True + constraint_sla_availability.sla_availability.availability = availability + + constraint_sla_isolation = slice_group_obj.slice_constraints.add() # pylint: disable=no-member + constraint_sla_isolation.sla_isolation.isolation_level.append(NO_ISOLATION) + + return slice_group_obj + +def create_slice_groups( + slice_groups : List[Tuple[str, float, float]], context_uuid : str = DEFAULT_CONTEXT_NAME +) -> Dict[str, SliceId]: + context_client = ContextClient() + create_context(context_client, context_uuid) + + slice_group_ids : Dict[str, SliceId] = dict() + for slice_group in slice_groups: + slice_group_name = slice_group[0] + slice_group_obj = get_slice(context_client, slice_group_name, DEFAULT_CONTEXT_NAME) + if slice_group_obj is None: + slice_group_obj = create_slice_group( + DEFAULT_CONTEXT_NAME, slice_group_name, slice_group[2], slice_group[1]) + slice_group_id = context_client.SetSlice(slice_group_obj) + slice_group_ids[slice_group_name] = slice_group_id + else: + slice_group_ids[slice_group_name] = slice_group_obj.slice_id + + return slice_group_ids + +def get_slice_grouping_parameters(slice_obj : Slice) -> Optional[Tuple[float, float]]: + isolation_levels : Set[int] = set() + availability : Optional[float] = None + capacity_gbps : Optional[float] = None + + for constraint in slice_obj.slice_constraints: + kind = constraint.WhichOneof('constraint') + if kind == 'sla_isolation': + isolation_levels.update(constraint.sla_isolation.isolation_level) + elif kind == 'sla_capacity': + capacity_gbps = constraint.sla_capacity.capacity_gbps + elif kind == 'sla_availability': + availability = constraint.sla_availability.availability + else: + continue + + no_isolation_level = len(isolation_levels) == 0 + single_isolation_level = len(isolation_levels) == 1 + has_no_isolation_level = NO_ISOLATION in isolation_levels + can_be_grouped = no_isolation_level or (single_isolation_level and has_no_isolation_level) + if not can_be_grouped: return None + if availability is None: return None + if capacity_gbps is None: return None + return availability, capacity_gbps + +def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool: + group_name, availability, capacity_gbps = selected_group + slice_uuid = slice_obj.slice_id.slice_uuid.uuid + + context_client = ContextClient() + slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True) + if slice_group_obj is None: + raise NotFoundException('Slice', group_name, extra_details='while adding to group') + + for subslice_id in slice_group_obj.slice_subslice_ids: + if subslice_id == slice_obj.slice_id: break # already added + else: + slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id) + # TODO: add other logic, such as re-configure parent slice + + metrics_exporter = MetricsExporter() + metrics_exporter.export_point( + slice_uuid, group_name, availability, capacity_gbps, is_center=False) + + return True + +def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float, float]) -> bool: + group_name, _, _ = selected_group + slice_uuid = slice_obj.slice_id.slice_uuid.uuid + + context_client = ContextClient() + slice_group_obj = get_slice(context_client, group_name, DEFAULT_CONTEXT_NAME, rw_copy=True) + if slice_group_obj is None: + raise NotFoundException('Slice', group_name, extra_details='while removing from group') + + if slice_obj.slice_id in slice_group_obj.slice_subslice_ids: + slice_group_obj.slice_subslice_ids.remove(slice_obj.slice_id) + # TODO: other logic, such as deconfigure parent slice + + metrics_exporter = MetricsExporter() + metrics_exporter.delete_point(slice_uuid) + return True diff --git a/src/slice/service/slice_grouper/__init__.py b/src/slice/service/slice_grouper/__init__.py new file mode 100644 index 000000000..1549d9811 --- /dev/null +++ b/src/slice/service/slice_grouper/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + -- GitLab From 7a3d877901dbbaa2321038418ab8f9d4856d35a5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 10:39:50 +0000 Subject: [PATCH 09/34] Slice Manifest: - updated to include flag to activate slice grouping - updated to import qdb secrets --- manifests/sliceservice.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index 447f6a1c7..e5757874b 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -37,6 +37,11 @@ spec: env: - name: LOG_LEVEL value: "INFO" + - name: SLICE_GROUPING + value: "ENABLE" + envFrom: + - secretRef: + name: qdb-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:4040"] -- GitLab From 35e7c4c9dd7eea80109cbc1bfbf15be9daa37ffd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 12:53:39 +0000 Subject: [PATCH 10/34] WebUI component: - Added Grafana dashboard for slice grouping --- src/webui/grafana_db_slc_grps_psql.json | 176 ++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 src/webui/grafana_db_slc_grps_psql.json diff --git a/src/webui/grafana_db_slc_grps_psql.json b/src/webui/grafana_db_slc_grps_psql.json new file mode 100644 index 000000000..0b38a7c7b --- /dev/null +++ b/src/webui/grafana_db_slc_grps_psql.json @@ -0,0 +1,176 @@ +{"overwrite": true, "folderId": 0, "dashboard": + { + "id": null, + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "postgres", + "uid": "questdb-slc-grp" + }, + "gridPos": { + "h": 21, + "w": 11, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "ReferenceLines": [], + "border": { + "color": "yellow", + "size": 0 + }, + "fieldSets": [ + { + "col": 6, + "color": "#C4162A", + "colorCol": 3, + "dotSize": 2, + "hidden": false, + "lineSize": 1, + "lineType": "none", + "polynomialOrder": 3, + "sizeCol": -7 + }, + { + "col": 5, + "color": "#edcd7d", + "colorCol": 3, + "dotSize": 2, + "hidden": false, + "lineSize": 1, + "lineType": "none", + "polynomialOrder": 3, + "sizeCol": -2 + } + ], + "grid": { + "color": "gray" + }, + "label": { + "col": -1, + "color": "#CCC", + "textSize": 2 + }, + "legend": { + "show": false, + "size": 0 + }, + "xAxis": { + "col": 4, + "inverted": false + }, + "xAxisExtents": { + "min": 0, + "max": 100 + }, + "xAxisTitle": { + "text": "Availability %", + "color": "white", + "textSize": 2, + "rotated": false, + "logScale": false, + "fontSize": 4, + "fontColor": "white" + }, + "xMargins": { + "lower": 30, + "upper": 10 + }, + "yAxisExtents": { + "min": 0, + "max": 100 + }, + "yAxisTitle": { + "text": "Capacity Gb/s", + "color": "#ccccdc", + "textSize": 2, + "rotated": true, + "logScale": false, + "fontSize": 4, + "fontColor": "white" + }, + "yMargins": { + "lower": 20, + "upper": 20 + } + }, + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "yTIXTo14z" + }, + "format": "table", + "group": [], + "hide": false, + "metricColumn": "none", + "rawQuery": true, + "rawSql": "SELECT timestamp as \"time\", slice_uuid, slice_group, slice_color, slice_availability, slice_capacity, slice_capacity_center\nFROM slice_groups\nWHERE $__timeFilter(timestamp);", + "refId": "A", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "column" + } + ] + ], + "table": "slice_groups", + "timeColumn": "timestamp", + "where": [] + } + ], + "title": "Slice Groups", + "transformations": [], + "type": "michaeldmoore-scatter-panel" + } + ], + "refresh": "5s", + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Slice Grouping", + "uid": "tfs-slice-grps", + "version": 2, + "weekStart": "" + } +} -- GitLab From 947de0bbf10a04181de25464b8354ecb4c63e774 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 12:54:42 +0000 Subject: [PATCH 11/34] Deploy Scripts: - added support for deploying Slice Grouping monitoring table and Grafana dashboard --- deploy/all.sh | 8 ++++++-- deploy/qdb.sh | 9 +++++++-- deploy/tfs.sh | 43 +++++++++++++++++++++++++++++++++++++++++++ my_deploy.sh | 7 +++++-- 4 files changed, 61 insertions(+), 6 deletions(-) diff --git a/deploy/all.sh b/deploy/all.sh index 09239afed..6f5592cb4 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -110,10 +110,14 @@ export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} # If not already set, set the table name to be used by Monitoring for KPIs. export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} + # If not already set, disable flag for dropping tables if they exist. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! -# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variable -# QDB_TABLE_MONITORING_KPIS will be dropped while checking/deploying QuestDB. +# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables +# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while +# checking/deploying QuestDB. export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} # If not already set, disable flag for re-deploying QuestDB from scratch. diff --git a/deploy/qdb.sh b/deploy/qdb.sh index a65408804..df11c25e8 100755 --- a/deploy/qdb.sh +++ b/deploy/qdb.sh @@ -30,10 +30,14 @@ export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} # If not already set, set the table name to be used by Monitoring for KPIs. export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} + # If not already set, disable flag for dropping tables if they exist. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! -# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variable -# QDB_TABLE_MONITORING_KPIS will be dropped while checking/deploying QuestDB. +# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables +# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped +# while checking/deploying QuestDB. export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} # If not already set, disable flag for re-deploying QuestDB from scratch. @@ -150,6 +154,7 @@ function qdb_drop_tables() { echo "Drop tables, if exist" curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;" + curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;" echo } diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 2bacc8cac..68c086c4f 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -81,6 +81,9 @@ export QDB_PASSWORD=${QDB_PASSWORD:-"quest"} # If not already set, set the table name to be used by Monitoring for KPIs. export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"} +# If not already set, set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} + ######################################################################################################################## # Automated steps start here @@ -131,6 +134,7 @@ kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type=' --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \ --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \ --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \ + --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \ --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \ --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD} printf "\n" @@ -326,6 +330,11 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}" echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT + echo ">> Installing Scatter Plot plugin..." + curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \ + ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install + echo + # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/ QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}" echo ">> Creating datasources..." @@ -354,17 +363,51 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" }' ${GRAFANA_URL_UPDATED}/api/datasources echo + curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ + "access" : "proxy", + "type" : "postgres", + "name" : "questdb-slc-grp", + "url" : "'${QDB_HOST_PORT}'", + "database" : "'${QDB_TABLE_SLICE_GROUPS}'", + "user" : "'${QDB_USERNAME}'", + "basicAuth": false, + "isDefault": false, + "jsonData" : { + "sslmode" : "disable", + "postgresVersion" : 1100, + "maxOpenConns" : 0, + "maxIdleConns" : 2, + "connMaxLifetime" : 14400, + "tlsAuth" : false, + "tlsAuthWithCACert" : false, + "timescaledb" : false, + "tlsConfigurationMethod": "file-path", + "tlsSkipVerify" : true + }, + "secureJsonData": {"password": "'${QDB_PASSWORD}'"} + }' ${GRAFANA_URL_UPDATED}/api/datasources + printf "\n\n" + echo ">> Creating dashboards..." # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/ curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \ ${GRAFANA_URL_UPDATED}/api/dashboards/db echo + curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \ + ${GRAFANA_URL_UPDATED}/api/dashboards/db + printf "\n\n" + echo ">> Staring dashboards..." DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit" DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} echo + DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps" + DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id') + curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID} + echo + printf "\n\n" fi diff --git a/my_deploy.sh b/my_deploy.sh index 1efea75bb..6a360812b 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -57,7 +57,7 @@ export CRDB_DATABASE="tfs" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="" +export CRDB_DROP_DATABASE_IF_EXISTS="YES" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" @@ -86,8 +86,11 @@ export QDB_PASSWORD="quest" # Set the table name to be used by Monitoring for KPIs. export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + # Disable flag for dropping tables if they exist. -export QDB_DROP_TABLES_IF_EXIST="" +export QDB_DROP_TABLES_IF_EXIST="YES" # Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" -- GitLab From ada162bf56ae1ead03448119297e44b144b0c393 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 12:55:09 +0000 Subject: [PATCH 12/34] Slice component: - updated monitoring table name --- src/slice/service/slice_grouper/MetricsExporter.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/slice/service/slice_grouper/MetricsExporter.py b/src/slice/service/slice_grouper/MetricsExporter.py index d6738ac3e..ce3d88b80 100644 --- a/src/slice/service/slice_grouper/MetricsExporter.py +++ b/src/slice/service/slice_grouper/MetricsExporter.py @@ -31,7 +31,7 @@ MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}' METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') METRICSDB_ILP_PORT = int(os.environ.get('METRICSDB_ILP_PORT')) METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT')) -METRICSDB_TABLE = 'slice_groups' +METRICSDB_TABLE_SLICE_GROUPS = os.environ.get('METRICSDB_TABLE_SLICE_GROUPS') COLORS = { 'platinum': '#E5E4E2', @@ -46,7 +46,7 @@ SQL_MARK_DELETED = "UPDATE {:s} SET is_deleted='true' WHERE slice_uuid='{:s}';" class MetricsExporter(): def create_table(self) -> None: sql_query = ' '.join([ - 'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE)), + 'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)), ','.join([ 'timestamp TIMESTAMP', 'slice_uuid SYMBOL', @@ -62,9 +62,9 @@ class MetricsExporter(): try: result = self.rest_request(sql_query) if not result: raise Exception - LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE))) + LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS))) except Exception as e: - LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE), str(e))) + LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e))) raise def export_point( @@ -80,7 +80,7 @@ class MetricsExporter(): for retry in range(MAX_RETRIES): try: with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender: - sender.row(METRICSDB_TABLE, symbols=symbols, columns=columns, at=dt_timestamp) + sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp) sender.flush() LOGGER.info(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns))) return @@ -91,7 +91,7 @@ class MetricsExporter(): raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) def delete_point(self, slice_uuid : str) -> None: - sql_query = SQL_MARK_DELETED.format(str(METRICSDB_TABLE), slice_uuid) + sql_query = SQL_MARK_DELETED.format(str(METRICSDB_TABLE_SLICE_GROUPS), slice_uuid) try: result = self.rest_request(sql_query) if not result: raise Exception -- GitLab From 0f3b8b5171d254487ab5b0483ded23ecb93d31b0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 13:25:37 +0000 Subject: [PATCH 13/34] Deploy Scripts: - minor cosmetic changes --- deploy/qdb.sh | 1 + deploy/tfs.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/deploy/qdb.sh b/deploy/qdb.sh index df11c25e8..d94c000bf 100755 --- a/deploy/qdb.sh +++ b/deploy/qdb.sh @@ -154,6 +154,7 @@ function qdb_drop_tables() { echo "Drop tables, if exist" curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;" + echo curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;" echo } diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 68c086c4f..16cf5c13b 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -325,6 +325,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'" }' ${GRAFANA_URL_DEFAULT}/api/user/password echo + echo # Updated Grafana API URL GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}" -- GitLab From 5aff458e37aa3a37befcd30d406bdc0b027933f3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 16:42:11 +0000 Subject: [PATCH 14/34] WebUI component: - updated grafana slice-grouping dashboard queries - updated load-gen request form --- src/webui/grafana_db_slc_grps_psql.json | 8 ++-- src/webui/service/load_gen/forms.py | 12 +++--- src/webui/service/load_gen/routes.py | 39 +++++++++++-------- .../service/templates/load_gen/home.html | 36 ++++++++++------- 4 files changed, 54 insertions(+), 41 deletions(-) diff --git a/src/webui/grafana_db_slc_grps_psql.json b/src/webui/grafana_db_slc_grps_psql.json index 0b38a7c7b..6aa7a478b 100644 --- a/src/webui/grafana_db_slc_grps_psql.json +++ b/src/webui/grafana_db_slc_grps_psql.json @@ -126,14 +126,14 @@ { "datasource": { "type": "postgres", - "uid": "yTIXTo14z" + "uid": "questdb-slc-grp" }, "format": "table", "group": [], "hide": false, "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT timestamp as \"time\", slice_uuid, slice_group, slice_color, slice_availability, slice_capacity, slice_capacity_center\nFROM slice_groups\nWHERE $__timeFilter(timestamp);", + "rawSql": "SELECT timestamp as \"time\", slice_uuid, slice_group, slice_color, slice_availability, slice_capacity, slice_capacity_center, is_deleted\nFROM tfs_slice_groups\nWHERE $__timeFilter(timestamp) AND is_deleted <> 'true';", "refId": "A", "select": [ [ @@ -145,7 +145,7 @@ } ] ], - "table": "slice_groups", + "table": "tfs_slice_groups", "timeColumn": "timestamp", "where": [] } @@ -163,7 +163,7 @@ "list": [] }, "time": { - "from": "now-5m", + "from": "now-30m", "to": "now" }, "timepicker": {}, diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py index 3144fbe5d..911d28e44 100644 --- a/src/webui/service/load_gen/forms.py +++ b/src/webui/service/load_gen/forms.py @@ -18,7 +18,7 @@ from wtforms.validators import DataRequired, NumberRange class LoadGenStartForm(FlaskForm): num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)]) - num_generated = IntegerField('Num Generated', render_kw={'readonly': True}) + num_generated = IntegerField('Num Generated', default=0, render_kw={'readonly': True}) request_type_service_l2nm = BooleanField('Service L2NM', default=False) request_type_service_l3nm = BooleanField('Service L3NM', default=False) @@ -27,17 +27,17 @@ class LoadGenStartForm(FlaskForm): request_type_slice_l2nm = BooleanField('Slice L2NM', default=True) request_type_slice_l3nm = BooleanField('Slice L3NM', default=False) - offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=1.e-12)]) - holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=1.e-12)]) - inter_arrival_time = FloatField('Inter Arrival Time[seconds]', default=0, validators=[NumberRange(min=1.e-12)]) + offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=0.0)]) + holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)]) + inter_arrival_time = FloatField('Inter Arrival Time[seconds]', default=0, validators=[NumberRange(min=0.0)]) do_teardown = BooleanField('Do Teardown', default=True) record_to_dlt = BooleanField('Record to DLT', default=False) dlt_domain_id = StringField('DLT Domain Id', default='') - infinite_loop = BooleanField('Infinite Loop', render_kw={'readonly': True}) - running = BooleanField('Running', render_kw={'readonly': True}) + infinite_loop = BooleanField('Infinite Loop', default=False, render_kw={'readonly': True}) + running = BooleanField('Running', default=False, render_kw={'readonly': True}) submit = SubmitField('Start') diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py index 893900908..ab1733013 100644 --- a/src/webui/service/load_gen/routes.py +++ b/src/webui/service/load_gen/routes.py @@ -20,6 +20,14 @@ from .forms import LoadGenStartForm, LoadGenStopForm load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen') +def make_read_only(field, readonly : bool) -> None: + if not hasattr(field, 'render_kw'): + field.render_kw = dict(readonly=readonly) + elif field.render_kw is None: + field.render_kw = dict(readonly=readonly) + else: + field.render_kw['readonly'] = readonly + @load_gen.route('home', methods=['GET', 'POST']) def home(): load_gen_client = LoadGeneratorClient() @@ -61,7 +69,7 @@ def home(): load_gen_client.connect() load_gen_client.Stop(Empty()) load_gen_client.close() - flash('Load Generator Stoped.', 'success') + flash('Load Generator Stopped.', 'success') except Exception as e: # pylint: disable=broad-except flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger') @@ -88,19 +96,18 @@ def home(): form_start.request_type_slice_l2nm .default = RequestTypeEnum.REQUESTTYPE_SLICE_L2NM in request_types form_start.request_type_slice_l3nm .default = RequestTypeEnum.REQUESTTYPE_SLICE_L3NM in request_types - form_start.num_requests .render_kw['readonly'] = status.running - form_start.offered_load .render_kw['readonly'] = status.running - form_start.holding_time .render_kw['readonly'] = status.running - form_start.inter_arrival_time.render_kw['readonly'] = status.running - form_start.do_teardown .render_kw['readonly'] = status.running - form_start.record_to_dlt .render_kw['readonly'] = status.running - form_start.dlt_domain_id .render_kw['readonly'] = status.running - - form_start.request_type_service_l2nm.render_kw['readonly'] = status.running - form_start.request_type_service_l3nm.render_kw['readonly'] = status.running - form_start.request_type_service_mw .render_kw['readonly'] = status.running - form_start.request_type_service_tapi.render_kw['readonly'] = status.running - form_start.request_type_slice_l2nm .render_kw['readonly'] = status.running - form_start.request_type_slice_l3nm .render_kw['readonly'] = status.running + make_read_only(form_start.num_requests , status.running) + make_read_only(form_start.offered_load , status.running) + make_read_only(form_start.holding_time , status.running) + make_read_only(form_start.inter_arrival_time , status.running) + make_read_only(form_start.do_teardown , status.running) + make_read_only(form_start.record_to_dlt , status.running) + make_read_only(form_start.dlt_domain_id , status.running) + make_read_only(form_start.request_type_service_l2nm, status.running) + make_read_only(form_start.request_type_service_l3nm, status.running) + make_read_only(form_start.request_type_service_mw , status.running) + make_read_only(form_start.request_type_service_tapi, status.running) + make_read_only(form_start.request_type_slice_l2nm , status.running) + make_read_only(form_start.request_type_slice_l3nm , status.running) - return render_template('load_gen/home.html', form_start=form_start, form_stop=form_stop) + return render_template('load_gen/home.html', form_start=form_start, form_stop=form_stop, is_running=status.running) diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html index 5aa751392..684e60ed5 100644 --- a/src/webui/service/templates/load_gen/home.html +++ b/src/webui/service/templates/load_gen/home.html @@ -21,7 +21,9 @@
+ {% if not is_running %} {{ form_start.hidden_tag() }} + {% endif %}
{{ form_start.num_requests.label(class="col-sm-2 col-form-label") }} @@ -56,12 +58,14 @@
Service Types:
+ {{ form_start.request_type_slice_l2nm }} {{ form_start.request_type_slice_l2nm .label(class="col-sm-3 col-form-label") }} + {{ form_start.request_type_slice_l3nm }} {{ form_start.request_type_slice_l3nm .label(class="col-sm-3 col-form-label") }} +
{{ form_start.request_type_service_l2nm }} {{ form_start.request_type_service_l2nm.label(class="col-sm-3 col-form-label") }} {{ form_start.request_type_service_l3nm }} {{ form_start.request_type_service_l3nm.label(class="col-sm-3 col-form-label") }} +
{{ form_start.request_type_service_mw }} {{ form_start.request_type_service_mw .label(class="col-sm-3 col-form-label") }} {{ form_start.request_type_service_tapi }} {{ form_start.request_type_service_tapi.label(class="col-sm-3 col-form-label") }} - {{ form_start.request_type_slice_l2nm }} {{ form_start.request_type_slice_l2nm .label(class="col-sm-3 col-form-label") }} - {{ form_start.request_type_slice_l3nm }} {{ form_start.request_type_slice_l3nm .label(class="col-sm-3 col-form-label") }}

@@ -112,18 +116,17 @@
- {{ form_start.do_teardown(class="form-control") }} {{ form_start.do_teardown.label(class="col-sm-2 col-form-label") }} -
-
- -
- {{ form_start.record_to_dlt(class="form-control") }} {{ form_start.record_to_dlt.label(class="col-sm-2 col-form-label") }} +
+ {{ form_start.do_teardown }} {{ form_start.do_teardown.label(class="col-sm-3 col-form-label") }}
+

- {{ form_start.dlt_domain_id.label(class="col-sm-2 col-form-label") }} +
DLT Settings:
+ {{ form_start.record_to_dlt }} {{ form_start.record_to_dlt.label(class="col-sm-3 col-form-label") }}
+ {{ form_start.dlt_domain_id.label(class="col-sm-2 col-form-label") }} {% if form_start.dlt_domain_id.errors %} {{ form_start.dlt_domain_id(class="form-control is-invalid") }}
@@ -137,21 +140,23 @@
- {{ form_start.infinite_loop(class="form-control") }} {{ form_start.infinite_loop.label(class="col-sm-2 col-form-label") }} -
-
- -
- {{ form_start.running(class="form-control") }} {{ form_start.running.label(class="col-sm-2 col-form-label") }} +
Status:
+
+ {{ form_start.infinite_loop }} {{ form_start.infinite_loop.label(class="col-sm-3 col-form-label") }} + {{ form_start.running }} {{ form_start.running.label(class="col-sm-3 col-form-label") }} +

+ {% if not is_running %}
{{ form_start.submit(class="btn btn-primary") }}
+ {% endif %}
+ {% if is_running %}
{{ form_stop.hidden_tag() }}
@@ -160,5 +165,6 @@
+ {% endif %} {% endblock %} -- GitLab From cb1a1c394cce50b541b25c437a68ca7ae7bbefc4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 17 Feb 2023 17:08:59 +0000 Subject: [PATCH 15/34] Slice component: - added command to update slice groups when grouping --- src/slice/service/slice_grouper/Tools.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py index 1ff82bece..12337cf8e 100644 --- a/src/slice/service/slice_grouper/Tools.py +++ b/src/slice/service/slice_grouper/Tools.py @@ -121,6 +121,8 @@ def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, flo slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id) # TODO: add other logic, such as re-configure parent slice + context_client.SetSlice(slice_group_obj) + metrics_exporter = MetricsExporter() metrics_exporter.export_point( slice_uuid, group_name, availability, capacity_gbps, is_center=False) @@ -140,6 +142,12 @@ def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float slice_group_obj.slice_subslice_ids.remove(slice_obj.slice_id) # TODO: other logic, such as deconfigure parent slice + tmp_slice_group_obj = Slice() + tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id) # pylint: disable=no-member + slice_subslice_id = tmp_slice_group_obj.slice_subslice_ids.add() # pylint: disable=no-member + slice_subslice_id.CopyFrom(slice_obj.slice_id) + context_client.UnsetSlice(tmp_slice_group_obj) + metrics_exporter = MetricsExporter() metrics_exporter.delete_point(slice_uuid) return True -- GitLab From e9ed3dc7481e4712f15f4994ba88dd354e36d942 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 14:53:09 +0000 Subject: [PATCH 16/34] WebUI component: - Corrected logic, form and page to manage/monitor load-gen component --- src/webui/service/load_gen/forms.py | 13 +- src/webui/service/load_gen/routes.py | 149 ++++++++++-------- .../service/templates/load_gen/home.html | 101 +++++------- 3 files changed, 129 insertions(+), 134 deletions(-) diff --git a/src/webui/service/load_gen/forms.py b/src/webui/service/load_gen/forms.py index 911d28e44..4e0020b04 100644 --- a/src/webui/service/load_gen/forms.py +++ b/src/webui/service/load_gen/forms.py @@ -16,7 +16,7 @@ from flask_wtf import FlaskForm from wtforms import BooleanField, FloatField, IntegerField, StringField, SubmitField from wtforms.validators import DataRequired, NumberRange -class LoadGenStartForm(FlaskForm): +class LoadGenForm(FlaskForm): num_requests = IntegerField('Num Requests', default=100, validators=[DataRequired(), NumberRange(min=0)]) num_generated = IntegerField('Num Generated', default=0, render_kw={'readonly': True}) @@ -29,17 +29,14 @@ class LoadGenStartForm(FlaskForm): offered_load = FloatField('Offered Load [Erlang]', default=50, validators=[NumberRange(min=0.0)]) holding_time = FloatField('Holding Time [seconds]', default=10, validators=[NumberRange(min=0.0)]) - inter_arrival_time = FloatField('Inter Arrival Time[seconds]', default=0, validators=[NumberRange(min=0.0)]) + inter_arrival_time = FloatField('Inter Arrival Time [seconds]', default=0, validators=[NumberRange(min=0.0)]) do_teardown = BooleanField('Do Teardown', default=True) record_to_dlt = BooleanField('Record to DLT', default=False) dlt_domain_id = StringField('DLT Domain Id', default='') - infinite_loop = BooleanField('Infinite Loop', default=False, render_kw={'readonly': True}) - running = BooleanField('Running', default=False, render_kw={'readonly': True}) + infinite_loop = BooleanField('Infinite Loop', default=False, render_kw={'disabled': True}) + running = BooleanField('Running', default=False, render_kw={'disabled': True}) - submit = SubmitField('Start') - -class LoadGenStopForm(FlaskForm): - submit = SubmitField('Stop') + submit = SubmitField('Start/Stop') diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py index ab1733013..5f47f06b0 100644 --- a/src/webui/service/load_gen/routes.py +++ b/src/webui/service/load_gen/routes.py @@ -12,102 +12,115 @@ # See the License for the specific language governing permissions and # limitations under the License. -from flask import render_template, Blueprint, flash +from typing import Any, Optional +from flask import redirect, render_template, Blueprint, flash, url_for from common.proto.context_pb2 import Empty from common.proto.load_generator_pb2 import Parameters, RequestTypeEnum from load_generator.client.LoadGeneratorClient import LoadGeneratorClient -from .forms import LoadGenStartForm, LoadGenStopForm +from .forms import LoadGenForm load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen') -def make_read_only(field, readonly : bool) -> None: +def set_properties(field, data : Any, readonly : Optional[bool] = None, disabled : Optional[bool] = None) -> None: if not hasattr(field, 'render_kw'): - field.render_kw = dict(readonly=readonly) + field.render_kw = dict() elif field.render_kw is None: - field.render_kw = dict(readonly=readonly) - else: + field.render_kw = dict() + + if readonly is not None: field.render_kw['readonly'] = readonly + if disabled is not None: + field.render_kw['disabled'] = disabled + + if (readonly is not None and readonly) or (disabled is not None and disabled): + field.data = data -@load_gen.route('home', methods=['GET', 'POST']) +@load_gen.route('home', methods=['GET']) def home(): load_gen_client = LoadGeneratorClient() - form_start = LoadGenStartForm() - form_stop = LoadGenStopForm() + load_gen_client.connect() + status = load_gen_client.GetStatus(Empty()) + load_gen_client.close() + + request_types = status.parameters.request_types + _request_type_service_l2nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM in request_types + _request_type_service_l3nm = RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM in request_types + _request_type_service_mw = RequestTypeEnum.REQUESTTYPE_SERVICE_MW in request_types + _request_type_service_tapi = RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI in request_types + _request_type_slice_l2nm = RequestTypeEnum.REQUESTTYPE_SLICE_L2NM in request_types + _request_type_slice_l3nm = RequestTypeEnum.REQUESTTYPE_SLICE_L3NM in request_types + + _offered_load = round(status.parameters.offered_load , ndigits=4) + _holding_time = round(status.parameters.holding_time , ndigits=4) + _inter_arrival_time = round(status.parameters.inter_arrival_time , ndigits=4) - if form_start.validate_on_submit(): - load_gen_params = Parameters() - load_gen_params.num_requests = form_start.num_requests.data - load_gen_params.offered_load = form_start.offered_load.data - load_gen_params.holding_time = form_start.holding_time.data - load_gen_params.inter_arrival_time = form_start.inter_arrival_time.data - load_gen_params.do_teardown = form_start.do_teardown.data - load_gen_params.dry_mode = False - load_gen_params.record_to_dlt = form_start.record_to_dlt.data - load_gen_params.dlt_domain_id = form_start.dlt_domain_id.data + form = LoadGenForm() + set_properties(form.num_requests , status.parameters.num_requests , readonly=status.running) + set_properties(form.offered_load , _offered_load , readonly=status.running) + set_properties(form.holding_time , _holding_time , readonly=status.running) + set_properties(form.inter_arrival_time , _inter_arrival_time , readonly=status.running) + set_properties(form.do_teardown , status.parameters.do_teardown , disabled=status.running) + set_properties(form.record_to_dlt , status.parameters.record_to_dlt, disabled=status.running) + set_properties(form.dlt_domain_id , status.parameters.dlt_domain_id, readonly=status.running) + set_properties(form.request_type_service_l2nm, _request_type_service_l2nm , disabled=status.running) + set_properties(form.request_type_service_l3nm, _request_type_service_l3nm , disabled=status.running) + set_properties(form.request_type_service_mw , _request_type_service_mw , disabled=status.running) + set_properties(form.request_type_service_tapi, _request_type_service_tapi , disabled=status.running) + set_properties(form.request_type_slice_l2nm , _request_type_slice_l2nm , disabled=status.running) + set_properties(form.request_type_slice_l3nm , _request_type_slice_l3nm , disabled=status.running) + set_properties(form.num_generated , status.num_generated , disabled=True) + set_properties(form.infinite_loop , status.infinite_loop , disabled=True) + set_properties(form.running , status.running , disabled=True) - del load_gen_params.request_types[:] # pylint: disable=no-member - request_types = list() - if form_start.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM) - if form_start.request_type_service_l3nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM) - if form_start.request_type_service_mw .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_MW ) - if form_start.request_type_service_tapi.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI) - if form_start.request_type_slice_l2nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L2NM ) - if form_start.request_type_slice_l3nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L3NM ) - load_gen_params.request_types.extend(request_types) # pylint: disable=no-member + form.submit.label.text = 'Stop' if status.running else 'Start' + form_action = url_for('load_gen.stop') if status.running else url_for('load_gen.start') + return render_template('load_gen/home.html', form=form, form_action=form_action) +@load_gen.route('start', methods=['POST']) +def start(): + form = LoadGenForm() + if form.validate_on_submit(): try: + load_gen_params = Parameters() + load_gen_params.num_requests = form.num_requests.data + load_gen_params.offered_load = form.offered_load.data + load_gen_params.holding_time = form.holding_time.data + load_gen_params.inter_arrival_time = form.inter_arrival_time.data + load_gen_params.do_teardown = form.do_teardown.data + load_gen_params.dry_mode = False + load_gen_params.record_to_dlt = form.record_to_dlt.data + load_gen_params.dlt_domain_id = form.dlt_domain_id.data + + del load_gen_params.request_types[:] # pylint: disable=no-member + request_types = list() + if form.request_type_service_l2nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM) + if form.request_type_service_l3nm.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM) + if form.request_type_service_mw .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_MW ) + if form.request_type_service_tapi.data: request_types.append(RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI) + if form.request_type_slice_l2nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L2NM ) + if form.request_type_slice_l3nm .data: request_types.append(RequestTypeEnum.REQUESTTYPE_SLICE_L3NM ) + load_gen_params.request_types.extend(request_types) # pylint: disable=no-member + + load_gen_client = LoadGeneratorClient() load_gen_client.connect() load_gen_client.Start(load_gen_params) load_gen_client.close() flash('Load Generator Started.', 'success') except Exception as e: # pylint: disable=broad-except flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger') + return redirect(url_for('load_gen.home')) - if form_stop.validate_on_submit(): +@load_gen.route('stop', methods=['POST']) +def stop(): + form = LoadGenForm() + if form.validate_on_submit(): try: + load_gen_client = LoadGeneratorClient() load_gen_client.connect() load_gen_client.Stop(Empty()) load_gen_client.close() flash('Load Generator Stopped.', 'success') except Exception as e: # pylint: disable=broad-except flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger') - - load_gen_client.connect() - status = load_gen_client.GetStatus(Empty()) - load_gen_client.close() - - form_start.num_requests .default = status.parameters.num_requests - form_start.offered_load .default = status.parameters.offered_load - form_start.holding_time .default = status.parameters.holding_time - form_start.inter_arrival_time.default = status.parameters.inter_arrival_time - form_start.do_teardown .default = status.parameters.do_teardown - form_start.record_to_dlt .default = status.parameters.record_to_dlt - form_start.dlt_domain_id .default = status.parameters.dlt_domain_id - form_start.num_generated .default = status.num_generated - form_start.infinite_loop .default = status.infinite_loop - form_start.running .default = status.running - - request_types = status.parameters.request_types - form_start.request_type_service_l2nm.default = RequestTypeEnum.REQUESTTYPE_SERVICE_L2NM in request_types - form_start.request_type_service_l3nm.default = RequestTypeEnum.REQUESTTYPE_SERVICE_L3NM in request_types - form_start.request_type_service_mw .default = RequestTypeEnum.REQUESTTYPE_SERVICE_MW in request_types - form_start.request_type_service_tapi.default = RequestTypeEnum.REQUESTTYPE_SERVICE_TAPI in request_types - form_start.request_type_slice_l2nm .default = RequestTypeEnum.REQUESTTYPE_SLICE_L2NM in request_types - form_start.request_type_slice_l3nm .default = RequestTypeEnum.REQUESTTYPE_SLICE_L3NM in request_types - - make_read_only(form_start.num_requests , status.running) - make_read_only(form_start.offered_load , status.running) - make_read_only(form_start.holding_time , status.running) - make_read_only(form_start.inter_arrival_time , status.running) - make_read_only(form_start.do_teardown , status.running) - make_read_only(form_start.record_to_dlt , status.running) - make_read_only(form_start.dlt_domain_id , status.running) - make_read_only(form_start.request_type_service_l2nm, status.running) - make_read_only(form_start.request_type_service_l3nm, status.running) - make_read_only(form_start.request_type_service_mw , status.running) - make_read_only(form_start.request_type_service_tapi, status.running) - make_read_only(form_start.request_type_slice_l2nm , status.running) - make_read_only(form_start.request_type_slice_l3nm , status.running) - - return render_template('load_gen/home.html', form_start=form_start, form_stop=form_stop, is_running=status.running) + return redirect(url_for('load_gen.home')) diff --git a/src/webui/service/templates/load_gen/home.html b/src/webui/service/templates/load_gen/home.html index 684e60ed5..d58f42601 100644 --- a/src/webui/service/templates/load_gen/home.html +++ b/src/webui/service/templates/load_gen/home.html @@ -20,36 +20,34 @@

Load Generator


-
- {% if not is_running %} - {{ form_start.hidden_tag() }} - {% endif %} + + {{ form.hidden_tag() }}
- {{ form_start.num_requests.label(class="col-sm-2 col-form-label") }} + {{ form.num_requests.label(class="col-sm-2 col-form-label") }}
- {% if form_start.num_requests.errors %} - {{ form_start.num_requests(class="form-control is-invalid") }} + {% if form.num_requests.errors %} + {{ form.num_requests(class="form-control is-invalid") }}
- {% for error in form_start.num_requests.errors %}{{ error }}{% endfor %} + {% for error in form.num_requests.errors %}{{ error }}{% endfor %}
{% else %} - {{ form_start.num_requests(class="form-control") }} + {{ form.num_requests(class="form-control") }} {% endif %}

- {{ form_start.num_generated.label(class="col-sm-2 col-form-label") }} + {{ form.num_generated.label(class="col-sm-2 col-form-label") }}
- {% if form_start.num_generated.errors %} - {{ form_start.num_generated(class="form-control is-invalid") }} + {% if form.num_generated.errors %} + {{ form.num_generated(class="form-control is-invalid") }}
- {% for error in form_start.num_generated.errors %}{{ error }}{% endfor %} + {% for error in form.num_generated.errors %}{{ error }}{% endfor %}
{% else %} - {{ form_start.num_generated(class="form-control") }} + {{ form.num_generated(class="form-control") }} {% endif %}
@@ -58,58 +56,58 @@
Service Types:
- {{ form_start.request_type_slice_l2nm }} {{ form_start.request_type_slice_l2nm .label(class="col-sm-3 col-form-label") }} - {{ form_start.request_type_slice_l3nm }} {{ form_start.request_type_slice_l3nm .label(class="col-sm-3 col-form-label") }} + {{ form.request_type_slice_l2nm }} {{ form.request_type_slice_l2nm .label(class="col-sm-3 col-form-label") }} + {{ form.request_type_slice_l3nm }} {{ form.request_type_slice_l3nm .label(class="col-sm-3 col-form-label") }}
- {{ form_start.request_type_service_l2nm }} {{ form_start.request_type_service_l2nm.label(class="col-sm-3 col-form-label") }} - {{ form_start.request_type_service_l3nm }} {{ form_start.request_type_service_l3nm.label(class="col-sm-3 col-form-label") }} + {{ form.request_type_service_l2nm }} {{ form.request_type_service_l2nm.label(class="col-sm-3 col-form-label") }} + {{ form.request_type_service_l3nm }} {{ form.request_type_service_l3nm.label(class="col-sm-3 col-form-label") }}
- {{ form_start.request_type_service_mw }} {{ form_start.request_type_service_mw .label(class="col-sm-3 col-form-label") }} - {{ form_start.request_type_service_tapi }} {{ form_start.request_type_service_tapi.label(class="col-sm-3 col-form-label") }} + {{ form.request_type_service_mw }} {{ form.request_type_service_mw .label(class="col-sm-3 col-form-label") }} + {{ form.request_type_service_tapi }} {{ form.request_type_service_tapi.label(class="col-sm-3 col-form-label") }}

- {{ form_start.offered_load.label(class="col-sm-2 col-form-label") }} + {{ form.offered_load.label(class="col-sm-2 col-form-label") }}
- {% if form_start.offered_load.errors %} - {{ form_start.offered_load(class="form-control is-invalid") }} + {% if form.offered_load.errors %} + {{ form.offered_load(class="form-control is-invalid") }}
- {% for error in form_start.offered_load.errors %}{{ error }}{% endfor %} + {% for error in form.offered_load.errors %}{{ error }}{% endfor %}
{% else %} - {{ form_start.offered_load(class="form-control") }} + {{ form.offered_load(class="form-control") }} {% endif %}

- {{ form_start.holding_time.label(class="col-sm-2 col-form-label") }} + {{ form.holding_time.label(class="col-sm-2 col-form-label") }}
- {% if form_start.holding_time.errors %} - {{ form_start.holding_time(class="form-control is-invalid") }} + {% if form.holding_time.errors %} + {{ form.holding_time(class="form-control is-invalid") }}
- {% for error in form_start.holding_time.errors %}{{ error }}{% endfor %} + {% for error in form.holding_time.errors %}{{ error }}{% endfor %}
{% else %} - {{ form_start.holding_time(class="form-control") }} + {{ form.holding_time(class="form-control") }} {% endif %}

- {{ form_start.inter_arrival_time.label(class="col-sm-2 col-form-label") }} + {{ form.inter_arrival_time.label(class="col-sm-2 col-form-label") }}
- {% if form_start.inter_arrival_time.errors %} - {{ form_start.inter_arrival_time(class="form-control is-invalid") }} + {% if form.inter_arrival_time.errors %} + {{ form.inter_arrival_time(class="form-control is-invalid") }}
- {% for error in form_start.inter_arrival_time.errors %}{{ error }}{% endfor %} + {% for error in form.inter_arrival_time.errors %}{{ error }}{% endfor %}
{% else %} - {{ form_start.inter_arrival_time(class="form-control") }} + {{ form.inter_arrival_time(class="form-control") }} {% endif %}
@@ -117,7 +115,7 @@
- {{ form_start.do_teardown }} {{ form_start.do_teardown.label(class="col-sm-3 col-form-label") }}
+ {{ form.do_teardown }} {{ form.do_teardown.label(class="col-sm-3 col-form-label") }}

@@ -125,15 +123,15 @@
DLT Settings:
- {{ form_start.record_to_dlt }} {{ form_start.record_to_dlt.label(class="col-sm-3 col-form-label") }}
- {{ form_start.dlt_domain_id.label(class="col-sm-2 col-form-label") }} - {% if form_start.dlt_domain_id.errors %} - {{ form_start.dlt_domain_id(class="form-control is-invalid") }} + {{ form.record_to_dlt }} {{ form.record_to_dlt.label(class="col-sm-3 col-form-label") }}
+ {{ form.dlt_domain_id.label(class="col-sm-2 col-form-label") }} + {% if form.dlt_domain_id.errors %} + {{ form.dlt_domain_id(class="form-control is-invalid") }}
- {% for error in form_start.dlt_domain_id.errors %}{{ error }}{% endfor %} + {% for error in form.dlt_domain_id.errors %}{{ error }}{% endfor %}
{% else %} - {{ form_start.dlt_domain_id(class="form-control") }} + {{ form.dlt_domain_id(class="form-control") }} {% endif %}
@@ -142,29 +140,16 @@
Status:
- {{ form_start.infinite_loop }} {{ form_start.infinite_loop.label(class="col-sm-3 col-form-label") }} - {{ form_start.running }} {{ form_start.running.label(class="col-sm-3 col-form-label") }} + {{ form.infinite_loop }} {{ form.infinite_loop.label(class="col-sm-3 col-form-label") }} + {{ form.running }} {{ form.running.label(class="col-sm-3 col-form-label") }}

- {% if not is_running %}
- {{ form_start.submit(class="btn btn-primary") }} + {{ form.submit(class="btn btn-primary") }}
- {% endif %}
- {% if is_running %} -
- {{ form_stop.hidden_tag() }} -
-
- {{ form_stop.submit(class="btn btn-primary") }} -
-
-
- {% endif %} - {% endblock %} -- GitLab From 2333c8fa0090e5dab330eacf2cd885c5529818d1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 15:05:28 +0000 Subject: [PATCH 17/34] Slice component: - Reduced log level in MetricsExporter --- src/slice/service/slice_grouper/MetricsExporter.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/slice/service/slice_grouper/MetricsExporter.py b/src/slice/service/slice_grouper/MetricsExporter.py index ce3d88b80..3708641ee 100644 --- a/src/slice/service/slice_grouper/MetricsExporter.py +++ b/src/slice/service/slice_grouper/MetricsExporter.py @@ -82,7 +82,7 @@ class MetricsExporter(): with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender: sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp) sender.flush() - LOGGER.info(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns))) + LOGGER.debug(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns))) return except (Exception, IngressError): # pylint: disable=broad-except LOGGER.exception(MSG_EXPORT_FAILED.format( @@ -95,7 +95,7 @@ class MetricsExporter(): try: result = self.rest_request(sql_query) if not result: raise Exception - LOGGER.info('Point {:s} deleted'.format(str(slice_uuid))) + LOGGER.debug('Point {:s} deleted'.format(str(slice_uuid))) except Exception as e: LOGGER.warning('Point {:s} cannot be deleted. {:s}'.format(str(slice_uuid), str(e))) raise @@ -114,10 +114,10 @@ class MetricsExporter(): json_response = response.json() if 'ddl' in json_response: - LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl']))) + LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl']))) return True elif 'dataset' in json_response: - LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset']))) + LOGGER.debug(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset']))) return json_response['dataset'] except Exception: # pylint: disable=broad-except -- GitLab From d34a33b9a5d5641d9f358e6eb6ba9a38b7b32834 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 16:54:37 +0000 Subject: [PATCH 18/34] Slice component: - Added old test files --- src/slice/tests/old/Main.py | 98 +++++++++++++++++++++ src/slice/tests/old/MetricsExporter.py | 116 +++++++++++++++++++++++++ src/slice/tests/old/test_kmeans.py | 77 ++++++++++++++++ src/slice/tests/old/test_subslices.py | 96 ++++++++++++++++++++ 4 files changed, 387 insertions(+) create mode 100644 src/slice/tests/old/Main.py create mode 100644 src/slice/tests/old/MetricsExporter.py create mode 100644 src/slice/tests/old/test_kmeans.py create mode 100644 src/slice/tests/old/test_subslices.py diff --git a/src/slice/tests/old/Main.py b/src/slice/tests/old/Main.py new file mode 100644 index 000000000..0924f1c64 --- /dev/null +++ b/src/slice/tests/old/Main.py @@ -0,0 +1,98 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, pandas, random, sys, time +#from matplotlib import pyplot as plt +from sklearn.cluster import KMeans +from typing import Dict, List, Tuple + +os.environ['METRICSDB_HOSTNAME' ] = '127.0.0.1' #'questdb-public.qdb.svc.cluster.local' +os.environ['METRICSDB_ILP_PORT' ] = '9009' +os.environ['METRICSDB_REST_PORT'] = '9000' + +from .MetricsExporter import MetricsExporter # pylint: disable=wrong-import-position + +logging.basicConfig(level=logging.DEBUG) +LOGGER : logging.Logger = logging.getLogger(__name__) + +def get_random_slices(count : int) -> List[Tuple[str, float, float]]: + slices = list() + for i in range(count): + slice_name = 'slice-{:03d}'.format(i) + slice_availability = random.uniform(00.0, 99.99) + slice_capacity_gbps = random.uniform(0.1, 100.0) + slices.append((slice_name, slice_availability, slice_capacity_gbps)) + return slices + +def init_kmeans() -> Tuple[KMeans, Dict[str, int]]: + groups = [ + # Name, avail[0..100], bw_gbps[0..100] + ('bronze', 10.0, 10.0), # ('silver', 25.0, 25.0), + ('silver', 30.0, 40.0), # ('silver', 25.0, 25.0), + ('gold', 70.0, 50.0), # ('gold', 90.0, 50.0), + ('platinum', 99.0, 100.0), + ] + df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity']) + + num_clusters = len(groups) + k_means = KMeans(n_clusters=num_clusters) + k_means.fit(df_groups[['availability', 'capacity']]) + + df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']]) + mapping = { + group['name']:{k:v for k,v in group.items() if k != 'name'} + for group in list(df_groups.to_dict('records')) + } + + return k_means, mapping + +def main(): + LOGGER.info('Starting...') + metrics_exporter = MetricsExporter() + metrics_exporter.create_table() + + k_means, mapping = init_kmeans() + label_to_group = {} + for group_name,group_attrs in mapping.items(): + label = group_attrs['label'] + availability = group_attrs['availability'] + capacity = group_attrs['capacity'] + metrics_exporter.export_point(group_name, group_name, availability, capacity, is_center=True) + label_to_group[label] = group_name + + slices = get_random_slices(10000) + for slice_ in slices: + sample = pandas.DataFrame([slice_[1:3]], columns=['availability', 'capacity']) + sample['label'] = k_means.predict(sample) + sample = sample.to_dict('records')[0] + label = sample['label'] + availability = sample['availability'] + capacity = sample['capacity'] + group_name = label_to_group[label] + metrics_exporter.export_point(slice_[0], group_name, availability, capacity, is_center=False) + time.sleep(0.01) + + #df_silver = df_slices[df_slices['group']==mapping['silver']] + #df_gold = df_slices[df_slices['group']==mapping['gold']] + #df_platinum = df_slices[df_slices['group']==mapping['platinum']] + #plt.scatter(df_silver.availability, df_silver.capacity, s=25, c='black' ) + #plt.scatter(df_gold.availability, df_gold.capacity, s=25, c='gold' ) + #plt.scatter(df_platinum.availability, df_platinum.capacity, s=25, c='silver') + #plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red' ) + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/slice/tests/old/MetricsExporter.py b/src/slice/tests/old/MetricsExporter.py new file mode 100644 index 000000000..3c04cb9fc --- /dev/null +++ b/src/slice/tests/old/MetricsExporter.py @@ -0,0 +1,116 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, logging, os, requests +from typing import Any, Literal, Union +from questdb.ingress import Sender, IngressError # pylint: disable=no-name-in-module + +LOGGER = logging.getLogger(__name__) + +MAX_RETRIES = 10 +DELAY_RETRIES = 0.5 + +MSG_EXPORT_EXECUTED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) executed' +MSG_EXPORT_FAILED = '[rest_request] Export(timestamp={:s}, symbols={:s}, columns={:s}) failed, retry={:d}/{:d}...' +MSG_REST_BAD_STATUS = '[rest_request] Bad Reply url="{:s}" params="{:s}": status_code={:d} content={:s}' +MSG_REST_EXECUTED = '[rest_request] Query({:s}) executed, result: {:s}' +MSG_REST_FAILED = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...' +MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}' + +METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') +METRICSDB_ILP_PORT = int(os.environ.get('METRICSDB_ILP_PORT')) +METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT')) +METRICSDB_TABLE_SLICE_GROUPS = 'slice_groups' + +COLORS = { + 'platinum': '#E5E4E2', + 'gold' : '#FFD700', + 'silver' : '#808080', + 'bronze' : '#CD7F32', +} +DEFAULT_COLOR = '#000000' # black + +class MetricsExporter(): + def __init__(self) -> None: + pass + + def create_table(self) -> None: + sql_query = ' '.join([ + 'CREATE TABLE IF NOT EXISTS {:s} ('.format(str(METRICSDB_TABLE_SLICE_GROUPS)), + ','.join([ + 'timestamp TIMESTAMP', + 'slice_uuid SYMBOL', + 'slice_group SYMBOL', + 'slice_color SYMBOL', + 'slice_availability DOUBLE', + 'slice_capacity_center DOUBLE', + 'slice_capacity DOUBLE', + ]), + ') TIMESTAMP(timestamp);' + ]) + try: + result = self.rest_request(sql_query) + if not result: raise Exception + LOGGER.info('Table {:s} created'.format(str(METRICSDB_TABLE_SLICE_GROUPS))) + except Exception as e: + LOGGER.warning('Table {:s} cannot be created. {:s}'.format(str(METRICSDB_TABLE_SLICE_GROUPS), str(e))) + raise + + def export_point( + self, slice_uuid : str, slice_group : str, slice_availability : float, slice_capacity : float, + is_center : bool = False + ) -> None: + dt_timestamp = datetime.datetime.utcnow() + slice_color = COLORS.get(slice_group, DEFAULT_COLOR) + symbols = dict(slice_uuid=slice_uuid, slice_group=slice_group, slice_color=slice_color) + columns = dict(slice_availability=slice_availability) + columns['slice_capacity_center' if is_center else 'slice_capacity'] = slice_capacity + + for retry in range(MAX_RETRIES): + try: + with Sender(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT) as sender: + sender.row(METRICSDB_TABLE_SLICE_GROUPS, symbols=symbols, columns=columns, at=dt_timestamp) + sender.flush() + LOGGER.info(MSG_EXPORT_EXECUTED.format(str(dt_timestamp), str(symbols), str(columns))) + return + except (Exception, IngressError): # pylint: disable=broad-except + LOGGER.exception(MSG_EXPORT_FAILED.format( + str(dt_timestamp), str(symbols), str(columns), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) + + def rest_request(self, rest_query : str) -> Union[Any, Literal[True]]: + url = 'http://{:s}:{:d}/exec'.format(METRICSDB_HOSTNAME, METRICSDB_REST_PORT) + params = {'query': rest_query, 'fmt': 'json'} + + for retry in range(MAX_RETRIES): + try: + response = requests.get(url, params=params) + status_code = response.status_code + if status_code not in {200}: + str_content = response.content.decode('UTF-8') + raise Exception(MSG_REST_BAD_STATUS.format(str(url), str(params), status_code, str_content)) + + json_response = response.json() + if 'ddl' in json_response: + LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['ddl']))) + return True + elif 'dataset' in json_response: + LOGGER.info(MSG_REST_EXECUTED.format(str(rest_query), str(json_response['dataset']))) + return json_response['dataset'] + + except Exception: # pylint: disable=broad-except + LOGGER.exception(MSG_REST_FAILED.format(str(rest_query), retry+1, MAX_RETRIES)) + + raise Exception(MSG_ERROR_MAX_RETRIES.format(MAX_RETRIES)) diff --git a/src/slice/tests/old/test_kmeans.py b/src/slice/tests/old/test_kmeans.py new file mode 100644 index 000000000..3f54621c5 --- /dev/null +++ b/src/slice/tests/old/test_kmeans.py @@ -0,0 +1,77 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pandas, random, sys +from matplotlib import pyplot as plt +from sklearn.cluster import KMeans +from typing import Dict, List, Tuple + +def get_random_slices(count : int) -> List[Tuple[str, float, float]]: + slices = list() + for i in range(count): + slice_name = 'slice-{:03d}'.format(i) + slice_availability = random.uniform(00.0, 99.99) + slice_capacity_gbps = random.uniform(0.1, 100.0) + slices.append((slice_name, slice_availability, slice_capacity_gbps)) + return slices + +def init_kmeans() -> Tuple[KMeans, Dict[str, int]]: + groups = [ + # Name, avail[0..100], bw_gbps[0..100] + ('silver', 25.0, 50.0), # ('silver', 25.0, 25.0), + ('gold', 90.0, 10.0), # ('gold', 90.0, 50.0), + ('platinum', 99.0, 100.0), + ] + df_groups = pandas.DataFrame(groups, columns=['name', 'availability', 'capacity']) + + num_clusters = len(groups) + k_means = KMeans(n_clusters=num_clusters) + k_means.fit(df_groups[['availability', 'capacity']]) + + df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity']]) + mapping = {group['name']:group['label'] for group in list(df_groups.to_dict('records'))} + + return k_means, mapping + +def main(): + k_means, mapping = init_kmeans() + slices = get_random_slices(500) + df_slices = pandas.DataFrame(slices, columns=['slice_uuid', 'availability', 'capacity']) + + # predict one + #sample = df_slices[['availability', 'capacity']].iloc[[0]] + #y_predicted = k_means.predict(sample) + #y_predicted + + df_slices['group'] = k_means.predict(df_slices[['availability', 'capacity']]) + + df_silver = df_slices[df_slices['group']==mapping['silver']] + df_gold = df_slices[df_slices['group']==mapping['gold']] + df_platinum = df_slices[df_slices['group']==mapping['platinum']] + + plt.scatter(df_silver.availability, df_silver.capacity, s=25, c='black' ) + plt.scatter(df_gold.availability, df_gold.capacity, s=25, c='gold' ) + plt.scatter(df_platinum.availability, df_platinum.capacity, s=25, c='silver') + plt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], s=100, c='red' ) + plt.xlabel('service-slo-availability') + plt.ylabel('service-slo-one-way-bandwidth') + #ax = plt.subplot(1, 1, 1) + #ax.set_ylim(bottom=0., top=1.) + #ax.set_xlim(left=0.) + plt.savefig('slice_grouping.png') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/slice/tests/old/test_subslices.py b/src/slice/tests/old/test_subslices.py new file mode 100644 index 000000000..39ee235df --- /dev/null +++ b/src/slice/tests/old/test_subslices.py @@ -0,0 +1,96 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sqlalchemy, sys +from sqlalchemy import Column, ForeignKey, String, event, insert +from sqlalchemy.orm import Session, declarative_base, relationship +from typing import Dict + +def _fk_pragma_on_connect(dbapi_con, con_record): + dbapi_con.execute('pragma foreign_keys=ON') + +_Base = declarative_base() + +class SliceModel(_Base): + __tablename__ = 'slice' + + slice_uuid = Column(String, primary_key=True) + + slice_subslices = relationship( + 'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid') + + def dump_id(self) -> Dict: + return {'uuid': self.slice_uuid} + + def dump(self) -> Dict: + return { + 'slice_id': self.dump_id(), + 'slice_subslice_ids': [ + slice_subslice.subslice.dump_id() + for slice_subslice in self.slice_subslices + ] + } + +class SliceSubSliceModel(_Base): + __tablename__ = 'slice_subslice' + + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) + subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) + + slice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined') + subslice = relationship('SliceModel', foreign_keys='SliceSubSliceModel.subslice_uuid', lazy='joined') + +def main(): + engine = sqlalchemy.create_engine('sqlite:///:memory:', echo=False, future=True) + event.listen(engine, 'connect', _fk_pragma_on_connect) + + _Base.metadata.create_all(engine) + + slice_data = [ + {'slice_uuid': 'slice-01'}, + {'slice_uuid': 'slice-01-01'}, + {'slice_uuid': 'slice-01-02'}, + ] + + slice_subslices_data = [ + {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-01'}, + {'slice_uuid': 'slice-01', 'subslice_uuid': 'slice-01-02'}, + ] + + # insert + with engine.connect() as conn: + conn.execute(insert(SliceModel).values(slice_data)) + conn.execute(insert(SliceSubSliceModel).values(slice_subslices_data)) + conn.commit() + + # read + with Session(engine) as session: + obj_list = session.query(SliceModel).all() + print([obj.dump() for obj in obj_list]) + session.commit() + + return 0 + +if __name__ == '__main__': + sys.exit(main()) + +[ + {'slice_id': {'uuid': 'slice-01'}, 'slice_subslice_ids': [ + {'uuid': 'slice-01-01'}, + {'uuid': 'slice-01-02'} + ]}, + {'slice_id': {'uuid': 'slice-01-01'}, 'slice_subslice_ids': []}, + {'slice_id': {'uuid': 'slice-01-02'}, 'slice_subslice_ids': []} +] -- GitLab From f6315c3cb2090cc7fec08ec2985eb11265331a0a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 16:55:09 +0000 Subject: [PATCH 19/34] Slice Component: - Added logic to manage endpoints, constraints and config rules in Slice Grouping --- src/slice/service/slice_grouper/Tools.py | 46 ++++++++++++++++++------ 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/src/slice/service/slice_grouper/Tools.py b/src/slice/service/slice_grouper/Tools.py index 12337cf8e..ca957f3c7 100644 --- a/src/slice/service/slice_grouper/Tools.py +++ b/src/slice/service/slice_grouper/Tools.py @@ -115,11 +115,25 @@ def add_slice_to_group(slice_obj : Slice, selected_group : Tuple[str, float, flo if slice_group_obj is None: raise NotFoundException('Slice', group_name, extra_details='while adding to group') - for subslice_id in slice_group_obj.slice_subslice_ids: - if subslice_id == slice_obj.slice_id: break # already added - else: - slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id) - # TODO: add other logic, such as re-configure parent slice + del slice_group_obj.slice_endpoint_ids[:] + for endpoint_id in slice_obj.slice_endpoint_ids: + slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id) + + del slice_group_obj.slice_constraints[:] + del slice_group_obj.slice_service_ids[:] + + del slice_group_obj.slice_subslice_ids[:] + slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id) + + del slice_group_obj.slice_config.config_rules[:] + for config_rule in slice_obj.slice_config.config_rules: + group_config_rule = slice_group_obj.slice_config.config_rules.add() + group_config_rule.CopyFrom(config_rule) + if config_rule.WhichOneof('config_rule') != 'custom': continue + TEMPLATE = '/subslice[{:s}]{:s}' + slice_resource_key = config_rule.custom.resource_key + group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key) + group_config_rule.custom.resource_key = group_resource_key context_client.SetSlice(slice_group_obj) @@ -139,13 +153,23 @@ def remove_slice_from_group(slice_obj : Slice, selected_group : Tuple[str, float raise NotFoundException('Slice', group_name, extra_details='while removing from group') if slice_obj.slice_id in slice_group_obj.slice_subslice_ids: - slice_group_obj.slice_subslice_ids.remove(slice_obj.slice_id) - # TODO: other logic, such as deconfigure parent slice - tmp_slice_group_obj = Slice() - tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id) # pylint: disable=no-member - slice_subslice_id = tmp_slice_group_obj.slice_subslice_ids.add() # pylint: disable=no-member - slice_subslice_id.CopyFrom(slice_obj.slice_id) + tmp_slice_group_obj.slice_id.CopyFrom(slice_group_obj.slice_id) # pylint: disable=no-member + + tmp_slice_group_obj.slice_subslice_ids.add().CopyFrom(slice_obj.slice_id) # pylint: disable=no-member + + for endpoint_id in slice_obj.slice_endpoint_ids: + tmp_slice_group_obj.slice_endpoint_ids.add().CopyFrom(endpoint_id) # pylint: disable=no-member + + for config_rule in slice_obj.slice_config.config_rules: + group_config_rule = tmp_slice_group_obj.slice_config.config_rules.add() # pylint: disable=no-member + group_config_rule.CopyFrom(config_rule) + if group_config_rule.WhichOneof('config_rule') != 'custom': continue + TEMPLATE = '/subslice[{:s}]{:s}' + slice_resource_key = group_config_rule.custom.resource_key + group_resource_key = TEMPLATE.format(slice_uuid, slice_resource_key) + group_config_rule.custom.resource_key = group_resource_key + context_client.UnsetSlice(tmp_slice_group_obj) metrics_exporter = MetricsExporter() -- GitLab From 96d5f39d7d777b92fbdc7434ae105ac4d5ad488a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 20 Feb 2023 18:15:14 +0000 Subject: [PATCH 20/34] WebUI component: - Reduced number of digits to 2 in SLA values --- src/webui/service/templates/service/detail.html | 2 +- src/webui/service/templates/slice/detail.html | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index e17b8539f..7d3c72a80 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -157,7 +157,7 @@ SLA Availability - - {{ constraint.sla_availability.availability }} %; + {{ round(constraint.sla_availability.availability, ndigits=2) }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 0c11744cb..6c35e6b50 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -157,7 +157,7 @@ SLA Availability - - {{ constraint.sla_availability.availability }} %; + {{ round(constraint.sla_availability.availability, ndigits=2) }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active -- GitLab From 7a0636254985f95cdb9d12d80b14f25f3b59d2e1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:45:23 +0000 Subject: [PATCH 21/34] Path Computation component - Frontend: - Added availability percentage to SLA Availability constraint --- src/pathcomp/frontend/tests/test_unitary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathcomp/frontend/tests/test_unitary.py b/src/pathcomp/frontend/tests/test_unitary.py index 5d642cf4c..8088259b8 100644 --- a/src/pathcomp/frontend/tests/test_unitary.py +++ b/src/pathcomp/frontend/tests/test_unitary.py @@ -203,7 +203,7 @@ def test_request_service_kdisjointpath( endpoint_ids, constraints = [], [ json_constraint_sla_capacity(10.0), json_constraint_sla_latency(12.0), - json_constraint_sla_availability(2, True), + json_constraint_sla_availability(2, True, 50.0), json_constraint_custom('diversity', {'end-to-end-diverse': 'all-other-accesses'}), ] -- GitLab From 65e189a751cb559aa80a4198b108c6bd4cef9817 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:45:35 +0000 Subject: [PATCH 22/34] Load Generator component: - Added availability percentage to SLA Availability constraint --- src/load_generator/load_gen/RequestGenerator.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index e7988760b..0ada285bc 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -230,11 +230,12 @@ class RequestGenerator: ] if request_type == RequestType.SERVICE_L2NM: + availability = round(random.uniform(0.0, 99.99), ndigits=2) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ - json_constraint_sla_availability(1, True), + json_constraint_sla_availability(1, True, availability), json_constraint_sla_capacity(capacity_gbps), json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), json_constraint_sla_latency(e2e_latency_ms), @@ -274,11 +275,12 @@ class RequestGenerator: request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) elif request_type == RequestType.SERVICE_L3NM: + availability = round(random.uniform(0.0, 99.99), ndigits=2) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ - json_constraint_sla_availability(1, True), + json_constraint_sla_availability(1, True, availability), json_constraint_sla_capacity(capacity_gbps), json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), json_constraint_sla_latency(e2e_latency_ms), @@ -378,10 +380,11 @@ class RequestGenerator: json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), ] + availability = round(random.uniform(0.0, 99.99), ndigits=2) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ - json_constraint_sla_availability(1, True), + json_constraint_sla_availability(1, True, availability), json_constraint_sla_capacity(capacity_gbps), json_constraint_sla_isolation([IsolationLevelEnum.NO_ISOLATION]), json_constraint_sla_latency(e2e_latency_ms), -- GitLab From b82e55e56c6a40671faf7524669cdf6a869fce95 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:45:46 +0000 Subject: [PATCH 23/34] Common - Tools - Object Factory: - Added availability percentage to SLA Availability constraint --- src/common/tools/object_factory/Constraint.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/common/tools/object_factory/Constraint.py b/src/common/tools/object_factory/Constraint.py index 0df049ab7..ef00e3872 100644 --- a/src/common/tools/object_factory/Constraint.py +++ b/src/common/tools/object_factory/Constraint.py @@ -29,9 +29,9 @@ def json_constraint_endpoint_location_gps(endpoint_id : Dict, latitude : float, def json_constraint_endpoint_priority(endpoint_id : Dict, priority : int) -> Dict: return {'endpoint_priority': {'endpoint_id': endpoint_id, 'priority': priority}} -def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool) -> Dict: +def json_constraint_sla_availability(num_disjoint_paths : int, all_active : bool, availability : float) -> Dict: return {'sla_availability': { - 'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active + 'num_disjoint_paths': num_disjoint_paths, 'all_active': all_active, 'availability': availability }} def json_constraint_sla_capacity(capacity_gbps : float) -> Dict: -- GitLab From b9d8455c8ad4bf8985f080a10fe0cae318ff97d9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 10:51:43 +0000 Subject: [PATCH 24/34] Common - Tools - gRPC Helpers: - Added availability percentage to SLA Availability constraint --- src/common/tools/grpc/Constraints.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py index f33e7b1ef..07f0b7782 100644 --- a/src/common/tools/grpc/Constraints.py +++ b/src/common/tools/grpc/Constraints.py @@ -160,7 +160,7 @@ def update_constraint_sla_latency(constraints, e2e_latency_ms : float) -> Constr return constraint def update_constraint_sla_availability( - constraints, num_disjoint_paths : int, all_active : bool + constraints, num_disjoint_paths : int, all_active : bool, availability : float ) -> Constraint: for constraint in constraints: if constraint.WhichOneof('constraint') != 'sla_availability': continue @@ -171,6 +171,7 @@ def update_constraint_sla_availability( constraint.sla_availability.num_disjoint_paths = num_disjoint_paths constraint.sla_availability.all_active = all_active + constraint.sla_availability.availability = availability return constraint def update_constraint_sla_isolation(constraints, isolation_levels : List[int]) -> Constraint: @@ -239,7 +240,8 @@ def copy_constraints(source_constraints, target_constraints): sla_availability = source_constraint.sla_availability num_disjoint_paths = sla_availability.num_disjoint_paths all_active = sla_availability.all_active - update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active) + availability = sla_availability.availability + update_constraint_sla_availability(target_constraints, num_disjoint_paths, all_active, availability) elif constraint_kind == 'sla_isolation': sla_isolation = source_constraint.sla_isolation -- GitLab From f2f26f828537c3da06ac3c335f2a70061debd814 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 11:19:58 +0000 Subject: [PATCH 25/34] Slice component: - Added close connection to enable load balancing in requests --- src/slice/service/SliceServiceServicerImpl.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 717127a00..acec3ae30 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -64,7 +64,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): # unable to identify the kind of slice; just update endpoints, constraints and config rules # update the slice in database, and return # pylint: disable=no-member - return context_client.SetSlice(slice_rw) + reply = context_client.SetSlice(slice_rw) + context_client.close() + return reply slice_with_uuids = context_client.GetSlice(slice_id_with_uuids) @@ -82,10 +84,12 @@ class SliceServiceServicerImpl(SliceServiceServicer): slice_active.CopyFrom(slice_) slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member context_client.SetSlice(slice_active) + interdomain_client.close() + context_client.close() return slice_id if self._slice_grouper.is_enabled: - grouped = self._slice_grouper.group(slice_with_uuids) + grouped = self._slice_grouper.group(slice_with_uuids) # pylint: disable=unused-variable # Local domain slice service_id = ServiceId() @@ -159,6 +163,9 @@ class SliceServiceServicerImpl(SliceServiceServicer): slice_active.CopyFrom(slice_) slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member context_client.SetSlice(slice_active) + + service_client.close() + context_client.close() return slice_id @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) @@ -195,6 +202,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): try: _slice = context_client.GetSlice(request) except: # pylint: disable=bare-except + context_client.close() return Empty() if is_multi_domain(context_client, _slice.slice_endpoint_ids): @@ -208,7 +216,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client.SetSlice(current_slice) if self._slice_grouper.is_enabled: - ungrouped = self._slice_grouper.ungroup(current_slice) + ungrouped = self._slice_grouper.ungroup(current_slice) # pylint: disable=unused-variable service_client = ServiceClient() for service_id in _slice.slice_service_ids: @@ -219,6 +227,8 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client.UnsetSlice(current_slice) service_client.DeleteService(service_id) + service_client.close() context_client.RemoveSlice(request) + context_client.close() return Empty() -- GitLab From 09036cdf24248d571bde520ca0db0fae14f0b21c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 12:52:26 +0000 Subject: [PATCH 26/34] Context component: - Added indexes recommended by Cockroach internal planner to boost performance --- src/context/service/database/models/_Base.py | 39 +++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/src/context/service/database/models/_Base.py b/src/context/service/database/models/_Base.py index 4323fb713..973f7b7be 100644 --- a/src/context/service/database/models/_Base.py +++ b/src/context/service/database/models/_Base.py @@ -13,10 +13,47 @@ # limitations under the License. import sqlalchemy -from sqlalchemy.orm import declarative_base +from typing import Any, List +from sqlalchemy.orm import Session, sessionmaker, declarative_base +from sqlalchemy.sql import text +from sqlalchemy_cockroachdb import run_transaction _Base = declarative_base() +def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None: + def index_storing( + index_name : str, table_name : str, index_fields : List[str], storing_fields : List[str] + ) -> Any: + str_index_fields = ','.join(['"{:s}"'.format(index_field) for index_field in index_fields]) + str_storing_fields = ','.join(['"{:s}"'.format(storing_field) for storing_field in storing_fields]) + INDEX_STORING = 'CREATE INDEX IF NOT EXISTS {:s} ON "{:s}" ({:s}) STORING ({:s});' + return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields)) + + statements = [ + index_storing('configrule_device_uuid_rec_idx', 'configrule', ['device_uuid'], [ + 'service_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at' + ]), + index_storing('configrule_service_uuid_rec_idx', 'configrule', ['service_uuid'], [ + 'device_uuid', 'slice_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at' + ]), + index_storing('configrule_slice_uuid_rec_idx', 'configrule', ['slice_uuid'], [ + 'device_uuid', 'service_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at' + ]), + index_storing('constraint_service_uuid_rec_idx', 'constraint', ['service_uuid'], [ + 'slice_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at' + ]), + index_storing('constraint_slice_uuid_rec_idx', 'constraint', ['slice_uuid'], [ + 'service_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at' + ]), + index_storing('endpoint_device_uuid_rec_idx', 'endpoint', ['device_uuid'], [ + 'topology_uuid', 'name', 'endpoint_type', 'kpi_sample_types', 'created_at', 'updated_at' + ]), + ] + def callback(session : Session) -> bool: + for stmt in statements: session.execute(stmt) + run_transaction(sessionmaker(bind=db_engine), callback) + def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False): if drop_if_exists: _Base.metadata.drop_all(db_engine) _Base.metadata.create_all(db_engine) + create_performance_enhancers(db_engine) -- GitLab From f9d7878295f5454c29c4b283ef4982fc3f8ce006 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 13:15:31 +0000 Subject: [PATCH 27/34] Slice component: - Disabled slice grouping by default --- manifests/sliceservice.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index e5757874b..49e2b5943 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -38,7 +38,7 @@ spec: - name: LOG_LEVEL value: "INFO" - name: SLICE_GROUPING - value: "ENABLE" + value: "DISABLE" envFrom: - secretRef: name: qdb-data -- GitLab From fcbe36414e0f29a2a8a49589bfeec2419b66e4be Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 14:24:46 +0000 Subject: [PATCH 28/34] WebUI component: - Updated number of 9's for availability --- src/webui/service/templates/service/detail.html | 2 +- src/webui/service/templates/slice/detail.html | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index 7d3c72a80..bee2e93c5 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -157,7 +157,7 @@ SLA Availability - - {{ round(constraint.sla_availability.availability, ndigits=2) }} %; + {{ round(constraint.sla_availability.availability, ndigits=5) }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 6c35e6b50..8f223e44d 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -157,7 +157,7 @@ SLA Availability - - {{ round(constraint.sla_availability.availability, ndigits=2) }} %; + {{ round(constraint.sla_availability.availability, ndigits=5) }} %; {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active -- GitLab From 2037276f55e667faff7628a4ddd85b5e22aca942 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 14:25:00 +0000 Subject: [PATCH 29/34] Load Generator component: - Updated number of 9's for availability --- src/load_generator/load_gen/RequestGenerator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index 0ada285bc..a70032dc6 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -230,7 +230,7 @@ class RequestGenerator: ] if request_type == RequestType.SERVICE_L2NM: - availability = round(random.uniform(0.0, 99.99), ndigits=2) + availability = round(random.uniform(0.0, 99.99), ndigits=5) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) @@ -275,7 +275,7 @@ class RequestGenerator: request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) elif request_type == RequestType.SERVICE_L3NM: - availability = round(random.uniform(0.0, 99.99), ndigits=2) + availability = round(random.uniform(0.0, 99.99), ndigits=5) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) @@ -380,7 +380,7 @@ class RequestGenerator: json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), ] - availability = round(random.uniform(0.0, 99.99), ndigits=2) + availability = round(random.uniform(0.0, 99.99), ndigits=5) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ -- GitLab From 49ce0990a1bbb0fbee8b58eff2261b3962067f00 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 14:39:48 +0000 Subject: [PATCH 30/34] Compute component - IETF L2VPN: - Added new field availability in SLA Availability update --- .../nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 1a8936ed4..b89fa2207 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -113,7 +113,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id) num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()} num_disjoint_paths = min(num_endpoints_per_location) - update_constraint_sla_availability(constraints, num_disjoint_paths, all_active) + update_constraint_sla_availability(constraints, num_disjoint_paths, all_active, 0.0) return target -- GitLab From 8c92eafbf45af038c46e2d0dd341d406fbc7b62c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 14:49:00 +0000 Subject: [PATCH 31/34] Load Generator component: - Updated number of 9's for availability --- src/load_generator/load_gen/RequestGenerator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index a70032dc6..e94dc0cb9 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -230,7 +230,7 @@ class RequestGenerator: ] if request_type == RequestType.SERVICE_L2NM: - availability = round(random.uniform(0.0, 99.99), ndigits=5) + availability = round(random.uniform(0.0, 99.9999), ndigits=5) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) @@ -275,7 +275,7 @@ class RequestGenerator: request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) elif request_type == RequestType.SERVICE_L3NM: - availability = round(random.uniform(0.0, 99.99), ndigits=5) + availability = round(random.uniform(0.0, 99.9999), ndigits=5) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) @@ -380,7 +380,7 @@ class RequestGenerator: json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), ] - availability = round(random.uniform(0.0, 99.99), ndigits=5) + availability = round(random.uniform(0.0, 99.9999), ndigits=5) capacity_gbps = round(random.uniform(0.1, 100.00), ndigits=2) e2e_latency_ms = round(random.uniform(5.0, 100.00), ndigits=2) constraints = [ -- GitLab From ae9844ff44f750dac046a430c4f871b5da3de114 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 14:53:30 +0000 Subject: [PATCH 32/34] Slice component: - Removed unneeded README.md file --- src/slice/service/README.md | 38 ------------------------------------- 1 file changed, 38 deletions(-) delete mode 100644 src/slice/service/README.md diff --git a/src/slice/service/README.md b/src/slice/service/README.md deleted file mode 100644 index 696b4a6e0..000000000 --- a/src/slice/service/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# SLICE GROUPING details - -## Description -- Similar slice requests can share underlying services. -- Clustering algorithm for slice grouping. -- Consider both paths and SLA constraints. -- SLA monitored by slice group. - -## TFS Target Objective -- Objective 3.2: Provisioning of multi-tenant transport network slices. -- Improve network resource usage by 30% by adopting multi-tenancy resource allocation algorithms. -- Optimal slice grouping: trade-offs between economies of scale and limitations as to which SLAs can be grouped together need to be considered. -- Optimal grouping of slices is required to maximise KPIs, such as resource utilisation, utility of the connectivity, and energy efficiency. -- In this context, trade-offs between the resulting control plane complexity and differential treatment of SLA classes should be considered. - -## New Requirements -- User can select if slice grouping is performed per-slice request. -- Slice grouping introduces a clustering algorithm for finding service optimisation while preserving slice SLA. -- Service (re-)optimisation is provided. - -## TFS Architecture Update -- Update Slice service RPC to include Slice Grouping. -- Use novel Slice model with SLA constraints. -- Use Policy Component with action to update services to apply slice grouping. -- Describe Slice service operation modes: per-request or user-triggered. - - OSS/BSS --> Slice : Create Slice with SLA (slice) - Slice --> Slice : Slice Grouping (slice) -alt [slice can be grouped to other slice services] - // do nothing and return existing slice -else [slice needs new services] - Slice --> ... : normal logic -end alt - Slice --> OSS/BSS : slice - -slice.proto: - rpc OrderSliceWithSLA(context.Slice) returns (context.SliceId) {} // If slice with SLA already exists, returns slice. If not, it creates it. - rpc RunSliceGrouping (context.Empty) returns (context.Empty) {} // Optimizes the underlying services and re-maps them to the requested slices. -- GitLab From 48d3068bb6a09aea87e7a53ec17b4bb493d861f4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 14:58:17 +0000 Subject: [PATCH 33/34] Deploy script: - Removed manual changes in my_deploy.sh --- my_deploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/my_deploy.sh b/my_deploy.sh index 6a360812b..518b90f28 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -57,7 +57,7 @@ export CRDB_DATABASE="tfs" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="YES" +export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" @@ -90,7 +90,7 @@ export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" # Disable flag for dropping tables if they exist. -export QDB_DROP_TABLES_IF_EXIST="YES" +export QDB_DROP_TABLES_IF_EXIST="" # Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" -- GitLab From 1b2785f2adacd099884931ce1871aff573c43af0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 21 Feb 2023 15:56:51 +0000 Subject: [PATCH 34/34] Context component: - Added indexes recommended by Cockroach internal planner to boost performance - Added extra foreign-key indexes to enhance constraint validation --- src/context/service/database/PolicyRule.py | 30 +++++++++---------- .../database/models/ConfigRuleModel.py | 6 ++-- .../database/models/ConnectionModel.py | 6 ++-- .../database/models/ConstraintModel.py | 4 +-- .../service/database/models/EndPointModel.py | 4 +-- .../service/database/models/LinkModel.py | 2 +- .../database/models/PolicyRuleModel.py | 22 +++++++------- .../service/database/models/ServiceModel.py | 4 +-- .../service/database/models/SliceModel.py | 10 +++---- .../service/database/models/TopologyModel.py | 10 +++---- src/context/service/database/models/_Base.py | 13 ++++++++ 11 files changed, 62 insertions(+), 49 deletions(-) diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py index a10010389..e95cec4ae 100644 --- a/src/context/service/database/PolicyRule.py +++ b/src/context/service/database/PolicyRule.py @@ -65,7 +65,7 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule policyrule_kind = PolicyRuleKindEnum._member_map_.get(policyrule_kind.upper()) # pylint: disable=no-member policyrule_state = grpc_to_enum__policyrule_state(policyrule_basic.policyRuleState.policyRuleState) - policyrule_state_message = policyrule_basic.policyRuleState.policyRuleStateMessage + policyrule_state_msg = policyrule_basic.policyRuleState.policyRuleStateMessage json_policyrule_basic = grpc_message_to_json(policyrule_basic) policyrule_eca_data = json.dumps({ @@ -77,15 +77,15 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule now = datetime.datetime.utcnow() policyrule_data = [{ - 'policyrule_uuid' : policyrule_uuid, - 'policyrule_kind' : policyrule_kind, - 'policyrule_state' : policyrule_state, - 'policyrule_state_message': policyrule_state_message, - 'policyrule_priority' : policyrule_basic.priority, - 'policyrule_eca_data' : policyrule_eca_data, - 'created_at' : now, - 'updated_at' : now, - }] + 'policyrule_uuid' : policyrule_uuid, + 'policyrule_kind' : policyrule_kind, + 'policyrule_state' : policyrule_state, + 'policyrule_state_msg': policyrule_state_msg, + 'policyrule_priority' : policyrule_basic.priority, + 'policyrule_eca_data' : policyrule_eca_data, + 'created_at' : now, + 'updated_at' : now, + }] policyrule_service_uuid = None if policyrule_kind == PolicyRuleKindEnum.SERVICE: @@ -108,11 +108,11 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule stmt = stmt.on_conflict_do_update( index_elements=[PolicyRuleModel.policyrule_uuid], set_=dict( - policyrule_state = stmt.excluded.policyrule_state, - policyrule_state_message = stmt.excluded.policyrule_state_message, - policyrule_priority = stmt.excluded.policyrule_priority, - policyrule_eca_data = stmt.excluded.policyrule_eca_data, - updated_at = stmt.excluded.updated_at, + policyrule_state = stmt.excluded.policyrule_state, + policyrule_state_msg = stmt.excluded.policyrule_state_msg, + policyrule_priority = stmt.excluded.policyrule_priority, + policyrule_eca_data = stmt.excluded.policyrule_eca_data, + updated_at = stmt.excluded.updated_at, ) ) stmt = stmt.returning(PolicyRuleModel.created_at, PolicyRuleModel.updated_at) diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index 363611105..d7bb97cd0 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -28,9 +28,9 @@ class ConfigRuleModel(_Base): __tablename__ = 'configrule' configrule_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE'), nullable=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True) - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE'), nullable=True, index=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True, index=True) position = Column(Integer, nullable=False) kind = Column(Enum(ConfigRuleKindEnum), nullable=False) action = Column(Enum(ORM_ConfigActionEnum), nullable=False) diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py index c2b20de20..156e33c6b 100644 --- a/src/context/service/database/models/ConnectionModel.py +++ b/src/context/service/database/models/ConnectionModel.py @@ -25,7 +25,7 @@ class ConnectionModel(_Base): __tablename__ = 'connection' connection_uuid = Column(UUID(as_uuid=False), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False, index=True) settings = Column(String, nullable=False) created_at = Column(DateTime, nullable=False) updated_at = Column(DateTime, nullable=False) @@ -56,7 +56,7 @@ class ConnectionEndPointModel(_Base): __tablename__ = 'connection_endpoint' connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) position = Column(Integer, nullable=False) connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined') @@ -70,7 +70,7 @@ class ConnectionSubServiceModel(_Base): __tablename__ = 'connection_subservice' connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True) - subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) + subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True) connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined') subservice = relationship('ServiceModel', lazy='joined') # back_populates='connection_subservices' diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index e9660d502..2412080c1 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -35,8 +35,8 @@ class ConstraintModel(_Base): __tablename__ = 'constraint' constraint_uuid = Column(UUID(as_uuid=False), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True) - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True, index=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True, index=True) position = Column(Integer, nullable=False) kind = Column(Enum(ConstraintKindEnum), nullable=False) data = Column(String, nullable=False) diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index e591bc718..12ba7e10e 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -23,8 +23,8 @@ class EndPointModel(_Base): __tablename__ = 'endpoint' endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), nullable=False) - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), nullable=False, index=True) + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False, index=True) name = Column(String, nullable=False) endpoint_type = Column(String, nullable=False) kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index 49c62d376..ee591f5c8 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -46,7 +46,7 @@ class LinkEndPointModel(_Base): __tablename__ = 'link_endpoint' link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints' diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py index 4059991e1..2f0c8a326 100644 --- a/src/context/service/database/models/PolicyRuleModel.py +++ b/src/context/service/database/models/PolicyRuleModel.py @@ -28,15 +28,15 @@ class PolicyRuleKindEnum(enum.Enum): class PolicyRuleModel(_Base): __tablename__ = 'policyrule' - policyrule_uuid = Column(UUID(as_uuid=False), primary_key=True) - policyrule_kind = Column(Enum(PolicyRuleKindEnum), nullable=False) - policyrule_state = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False) - policyrule_state_message = Column(String, nullable=False) - policyrule_priority = Column(Integer, nullable=False) - policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True) - policyrule_eca_data = Column(String, nullable=False) - created_at = Column(DateTime, nullable=False) - updated_at = Column(DateTime, nullable=False) + policyrule_uuid = Column(UUID(as_uuid=False), primary_key=True) + policyrule_kind = Column(Enum(PolicyRuleKindEnum), nullable=False) + policyrule_state = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False) + policyrule_state_msg = Column(String, nullable=False) + policyrule_priority = Column(Integer, nullable=False) + policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True, index=True) + policyrule_eca_data = Column(String, nullable=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) policyrule_service = relationship('ServiceModel') # back_populates='policyrules' policyrule_devices = relationship('PolicyRuleDeviceModel' ) # back_populates='policyrule' @@ -55,7 +55,7 @@ class PolicyRuleModel(_Base): 'policyRuleId': self.dump_id(), 'policyRuleState': { 'policyRuleState': self.policyrule_state.value, - 'policyRuleStateMessage': self.policyrule_state_message, + 'policyRuleStateMessage': self.policyrule_state_msg, }, 'priority': self.policyrule_priority, }) @@ -71,7 +71,7 @@ class PolicyRuleDeviceModel(_Base): __tablename__ = 'policyrule_device' policyrule_uuid = Column(ForeignKey('policyrule.policyrule_uuid', ondelete='RESTRICT'), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True, index=True) #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices' device = relationship('DeviceModel', lazy='joined') # back_populates='policyrule_devices' diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py index b581bf900..09ff381b5 100644 --- a/src/context/service/database/models/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -25,7 +25,7 @@ class ServiceModel(_Base): __tablename__ = 'service' service_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False, index=True) service_name = Column(String, nullable=False) service_type = Column(Enum(ORM_ServiceTypeEnum), nullable=False) service_status = Column(Enum(ORM_ServiceStatusEnum), nullable=False) @@ -67,7 +67,7 @@ class ServiceEndPointModel(_Base): __tablename__ = 'service_endpoint' service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints' diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py index 458bc714a..2d6c88416 100644 --- a/src/context/service/database/models/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -24,7 +24,7 @@ class SliceModel(_Base): __tablename__ = 'slice' slice_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False, index=True) slice_name = Column(String, nullable=True) slice_status = Column(Enum(ORM_SliceStatusEnum), nullable=False) slice_owner_uuid = Column(String, nullable=True) @@ -81,7 +81,7 @@ class SliceEndPointModel(_Base): __tablename__ = 'slice_endpoint' slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) slice = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints' @@ -90,7 +90,7 @@ class SliceServiceModel(_Base): __tablename__ = 'slice_service' slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True, index=True) slice = relationship('SliceModel', back_populates='slice_services', lazy='joined') service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services' @@ -98,8 +98,8 @@ class SliceServiceModel(_Base): class SliceSubSliceModel(_Base): __tablename__ = 'slice_subslice' - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True) - subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True) + subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), primary_key=True, index=True) slice = relationship( 'SliceModel', foreign_keys='SliceSubSliceModel.slice_uuid', back_populates='slice_subslices', lazy='joined') diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index 92802e5b2..7dc2333f0 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -22,7 +22,7 @@ class TopologyModel(_Base): __tablename__ = 'topology' topology_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False, index=True) topology_name = Column(String, nullable=False) created_at = Column(DateTime, nullable=False) updated_at = Column(DateTime, nullable=False) @@ -56,8 +56,8 @@ class TopologyModel(_Base): class TopologyDeviceModel(_Base): __tablename__ = 'topology_device' - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True) + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True, index=True) #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices' device = relationship('DeviceModel', lazy='joined') # back_populates='topology_devices' @@ -65,8 +65,8 @@ class TopologyDeviceModel(_Base): class TopologyLinkModel(_Base): __tablename__ = 'topology_link' - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) - link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True, index=True) + link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True, index=True) #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links' link = relationship('LinkModel', lazy='joined') # back_populates='topology_links' diff --git a/src/context/service/database/models/_Base.py b/src/context/service/database/models/_Base.py index 973f7b7be..a10de60eb 100644 --- a/src/context/service/database/models/_Base.py +++ b/src/context/service/database/models/_Base.py @@ -39,6 +39,9 @@ def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None: index_storing('configrule_slice_uuid_rec_idx', 'configrule', ['slice_uuid'], [ 'device_uuid', 'service_uuid', 'position', 'kind', 'action', 'data', 'created_at', 'updated_at' ]), + index_storing('connection_service_uuid_rec_idx', 'connection', ['service_uuid'], [ + 'settings', 'created_at', 'updated_at' + ]), index_storing('constraint_service_uuid_rec_idx', 'constraint', ['service_uuid'], [ 'slice_uuid', 'position', 'kind', 'data', 'created_at', 'updated_at' ]), @@ -48,6 +51,16 @@ def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None: index_storing('endpoint_device_uuid_rec_idx', 'endpoint', ['device_uuid'], [ 'topology_uuid', 'name', 'endpoint_type', 'kpi_sample_types', 'created_at', 'updated_at' ]), + index_storing('service_context_uuid_rec_idx', 'service', ['context_uuid'], [ + 'service_name', 'service_type', 'service_status', 'created_at', 'updated_at' + ]), + index_storing('slice_context_uuid_rec_idx', 'slice', ['context_uuid'], [ + 'slice_name', 'slice_status', 'slice_owner_uuid', 'slice_owner_string', 'created_at', 'updated_at' + ]), + + index_storing('topology_context_uuid_rec_idx', 'topology', ['context_uuid'], [ + 'topology_name', 'created_at', 'updated_at' + ]), ] def callback(session : Session) -> bool: for stmt in statements: session.execute(stmt) -- GitLab