diff --git a/deploy.sh b/deploy.sh index fa1dc2b3623255d2dac82cc1d982c607b9b6af5b..c62778417f7e07a119c778b58fe9c44105d5b1a5 100755 --- a/deploy.sh +++ b/deploy.sh @@ -200,8 +200,8 @@ for COMPONENT in $TFS_COMPONENTS; do DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") - kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" - kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" + #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG" echo " Collecting env-vars for '$COMPONENT' component..." @@ -234,7 +234,11 @@ done echo "Deploying extra manifests..." for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do echo "Processing manifest '$EXTRA_MANIFEST'..." - kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST + if [[ "$EXTRA_MANIFEST" == *"servicemonitor"* ]]; then + kubectl apply -f $EXTRA_MANIFEST + else + kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST + fi printf "\n" done printf "\n" diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 5c07971a328a389473899375f2d2aad9031f473e..2998640327864c1e9c2f6782a5adf252eb6673a7 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -45,6 +45,7 @@ spec: ports: - containerPort: 1010 - containerPort: 8080 + - containerPort: 9192 env: - name: DB_BACKEND value: "redis" @@ -74,6 +75,8 @@ apiVersion: v1 kind: Service metadata: name: contextservice + labels: + app: contextservice spec: type: ClusterIP selector: @@ -87,3 +90,7 @@ spec: protocol: TCP port: 8080 targetPort: 8080 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index d2595ab1915554d7ebfd786b8f39b531e40da490..83daa41f3c0cdf8e84b02dfc0ad18d8f7644e57b 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -20,6 +20,7 @@ spec: selector: matchLabels: app: deviceservice + replicas: 1 template: metadata: labels: @@ -32,6 +33,7 @@ spec: imagePullPolicy: Always ports: - containerPort: 2020 + - containerPort: 9192 env: - name: LOG_LEVEL value: "INFO" @@ -53,6 +55,8 @@ apiVersion: v1 kind: Service metadata: name: deviceservice + labels: + app: deviceservice spec: type: ClusterIP selector: @@ -62,3 +66,7 @@ spec: protocol: TCP port: 2020 targetPort: 2020 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index d2ad4f40444faa6b9de7724f8b3df077bb7910b2..0f6b5bb9df1ccfc6057c0746058da6754233376a 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -32,6 +32,7 @@ spec: imagePullPolicy: Always ports: - containerPort: 8080 + - containerPort: 9192 env: - name: LOG_LEVEL value: "INFO" @@ -82,6 +83,8 @@ apiVersion: v1 kind: Service metadata: name: dltservice + labels: + app: dltservice spec: type: ClusterIP selector: @@ -91,3 +94,7 @@ spec: protocol: TCP port: 8080 targetPort: 8080 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index 3ef3ffba301cadf26beaa34787dcd816e87c65a0..b275035f62c68eeb8d28f1892909650ca10defee 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -32,6 +32,7 @@ spec: imagePullPolicy: Always ports: - containerPort: 10010 + - containerPort: 9192 env: - name: LOG_LEVEL value: "INFO" @@ -53,6 +54,8 @@ apiVersion: v1 kind: Service metadata: name: interdomainservice + labels: + app: interdomainservice spec: type: ClusterIP selector: @@ -62,3 +65,7 @@ spec: protocol: TCP port: 10010 targetPort: 10010 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml index 39acfd52330ae6b1c3034e61e793d68491086237..aed8d1c51e5e84abec11dcc272c786b208dd9556 100644 --- a/manifests/monitoringservice.yaml +++ b/manifests/monitoringservice.yaml @@ -72,6 +72,9 @@ spec: - name: grpc containerPort: 7070 protocol: TCP + - name: metrics + containerPort: 9192 + protocol: TCP env: - name: LOG_LEVEL value: "INFO" @@ -101,6 +104,8 @@ apiVersion: v1 kind: Service metadata: name: monitoringservice + labels: + app: monitoringservice spec: type: ClusterIP selector: @@ -122,6 +127,10 @@ spec: protocol: TCP port: 8812 targetPort: 8812 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: networking.k8s.io/v1 diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 92e24ac42b7b86be6056709abd9a2cd6fc16598b..71c927b567316bb118ff085f1727abd03e92c0d7 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -20,6 +20,7 @@ spec: selector: matchLabels: app: pathcompservice + replicas: 5 template: metadata: labels: @@ -32,6 +33,7 @@ spec: imagePullPolicy: Always ports: - containerPort: 10020 + - containerPort: 9192 env: - name: LOG_LEVEL value: "INFO" @@ -75,6 +77,8 @@ apiVersion: v1 kind: Service metadata: name: pathcompservice + labels: + app: pathcompservice spec: type: ClusterIP selector: @@ -88,3 +92,7 @@ spec: protocol: TCP port: 8081 targetPort: 8081 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad5f042baa273d67d646a9168be1e0c0b1133ec1 --- /dev/null +++ b/manifests/servicemonitors.yaml @@ -0,0 +1,231 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-contextservice-metric + labels: + app: contextservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: contextservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-deviceservice-metric + labels: + app: deviceservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: deviceservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-serviceservice-metric + labels: + app: serviceservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: serviceservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-sliceservice-metric + labels: + app: sliceservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: sliceservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-pathcompservice-metric + labels: + app: pathcompservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: pathcompservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-monitoringservice-metric + labels: + app: monitoringservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: monitoringservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-dltservice-metric + labels: + app: dltservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: dltservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + namespace: monitoring # namespace where prometheus is running + name: tfs-interdomainservice-metric + labels: + app: interdomainservice + #release: prometheus + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, + # Prometheus cannot identify the metrics of the Flask app as the target.) +spec: + selector: + matchLabels: + # Target app service + #namespace: tfs + app: interdomainservice # same as above + #release: prometheus # same as above + endpoints: + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval + namespaceSelector: + any: false + matchNames: + - tfs # namespace where the app is running diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index a5568a5112eb08a02df2178ba45db57b57c19cc3..089be20f969509c9d5f12922a6cd991acd2f3bc0 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -20,6 +20,7 @@ spec: selector: matchLabels: app: serviceservice + replicas: 5 template: metadata: labels: @@ -32,6 +33,7 @@ spec: imagePullPolicy: Always ports: - containerPort: 3030 + - containerPort: 9192 env: - name: LOG_LEVEL value: "INFO" @@ -53,6 +55,8 @@ apiVersion: v1 kind: Service metadata: name: serviceservice + labels: + app: serviceservice spec: type: ClusterIP selector: @@ -62,3 +66,7 @@ spec: protocol: TCP port: 3030 targetPort: 3030 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index b20669b0c03cc22857abd1534e19780025b9066a..ff4b41fe7c709acf0d58c9c73b9f6198104a89fd 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -20,6 +20,7 @@ spec: selector: matchLabels: app: sliceservice + replicas: 1 template: metadata: labels: @@ -32,6 +33,7 @@ spec: imagePullPolicy: Always ports: - containerPort: 4040 + - containerPort: 9192 env: - name: LOG_LEVEL value: "INFO" @@ -53,6 +55,8 @@ apiVersion: v1 kind: Service metadata: name: sliceservice + labels: + app: sliceservice spec: type: ClusterIP selector: @@ -62,3 +66,7 @@ spec: protocol: TCP port: 4040 targetPort: 4040 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/proto/dlt_connector.proto b/proto/dlt_connector.proto index 1038d6ccd40c8393313fc7f8dbfd48b1e0cf1739..cee0c70bdcda42f435339751c6cfabf609b14d21 100644 --- a/proto/dlt_connector.proto +++ b/proto/dlt_connector.proto @@ -35,20 +35,24 @@ service DltConnectorService { message DltDeviceId { context.TopologyId topology_id = 1; - context.DeviceId device_id = 2; + context.DeviceId device_id = 2; + bool delete = 3; } message DltLinkId { context.TopologyId topology_id = 1; - context.LinkId link_id = 2; + context.LinkId link_id = 2; + bool delete = 3; } message DltServiceId { context.TopologyId topology_id = 1; - context.ServiceId service_id = 2; + context.ServiceId service_id = 2; + bool delete = 3; } message DltSliceId { context.TopologyId topology_id = 1; - context.SliceId slice_id = 2; + context.SliceId slice_id = 2; + bool delete = 3; } diff --git a/src/common/Constants.py b/src/common/Constants.py index 964d904da704324d6def548103675e815743d818..ffdfbc4e03adaa272ce5b841ea44923409df5cbe 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -20,7 +20,7 @@ DEFAULT_LOG_LEVEL = logging.WARNING # Default gRPC server settings DEFAULT_GRPC_BIND_ADDRESS = '0.0.0.0' -DEFAULT_GRPC_MAX_WORKERS = 10 +DEFAULT_GRPC_MAX_WORKERS = 200 DEFAULT_GRPC_GRACE_PERIOD = 60 # Default HTTP server settings diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee2a919e10f25104d0fa77caaf8bafa11c2b30f --- /dev/null +++ b/src/common/method_wrappers/Decorator.py @@ -0,0 +1,132 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging, threading +from enum import Enum +from typing import Dict, Tuple +from prometheus_client import Counter, Histogram +from prometheus_client.metrics import MetricWrapperBase, INF +from common.tools.grpc.Tools import grpc_message_to_json_string +from .ServiceExceptions import ServiceException + +class MetricTypeEnum(Enum): + COUNTER_STARTED = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_started' + COUNTER_COMPLETED = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_completed' + COUNTER_FAILED = 'tfs_{component:s}_{sub_module:s}_{method:s}_counter_requests_failed' + HISTOGRAM_DURATION = 'tfs_{component:s}_{sub_module:s}_{method:s}_histogram_duration' + +METRIC_TO_CLASS_PARAMS = { + MetricTypeEnum.COUNTER_STARTED : (Counter, {}), + MetricTypeEnum.COUNTER_COMPLETED : (Counter, {}), + MetricTypeEnum.COUNTER_FAILED : (Counter, {}), + MetricTypeEnum.HISTOGRAM_DURATION: (Histogram, { + 'buckets': ( + # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF + 0.0010, 0.0025, 0.0050, 0.0075, + 0.0100, 0.0250, 0.0500, 0.0750, + 0.1000, 0.2500, 0.5000, 0.7500, + 1.0000, 2.5000, 5.0000, 7.5000, + INF) + }) +} + +class MetricsPool: + lock = threading.Lock() + metrics : Dict[str, MetricWrapperBase] = dict() + + def __init__( + self, component : str, sub_module : str, labels : Dict[str, str] = {}, + default_metric_params : Dict[MetricTypeEnum, Dict] = dict() + ) -> None: + self._component = component + self._sub_module = sub_module + self._labels = labels + self._default_metric_params = default_metric_params + + def get_or_create(self, method : str, metric_type : MetricTypeEnum, **metric_params) -> MetricWrapperBase: + metric_name = str(metric_type.value).format( + component=self._component, sub_module=self._sub_module, method=method).upper() + with MetricsPool.lock: + if metric_name not in MetricsPool.metrics: + metric_tuple : Tuple[MetricWrapperBase, Dict] = METRIC_TO_CLASS_PARAMS.get(metric_type) + metric_class, default_metric_params = metric_tuple + if len(metric_params) == 0: metric_params = self._default_metric_params.get(metric_type, {}) + if len(metric_params) == 0: metric_params = default_metric_params + labels = sorted(self._labels.keys()) + MetricsPool.metrics[metric_name] = metric_class(metric_name.lower(), '', labels, **metric_params) + return MetricsPool.metrics[metric_name] + + def get_metrics( + self, method : str + ) -> Tuple[MetricWrapperBase, MetricWrapperBase, MetricWrapperBase, MetricWrapperBase]: + histogram_duration : Histogram = self.get_or_create(method, MetricTypeEnum.HISTOGRAM_DURATION) + counter_started : Counter = self.get_or_create(method, MetricTypeEnum.COUNTER_STARTED) + counter_completed : Counter = self.get_or_create(method, MetricTypeEnum.COUNTER_COMPLETED) + counter_failed : Counter = self.get_or_create(method, MetricTypeEnum.COUNTER_FAILED) + + if len(self._labels) > 0: + histogram_duration = histogram_duration.labels(**(self._labels)) + counter_started = counter_started.labels(**(self._labels)) + counter_completed = counter_completed.labels(**(self._labels)) + counter_failed = counter_failed.labels(**(self._labels)) + + return histogram_duration, counter_started, counter_completed, counter_failed + +def metered_subclass_method(metrics_pool : MetricsPool): + def outer_wrapper(func): + metrics = metrics_pool.get_metrics(func.__name__) + histogram_duration, counter_started, counter_completed, counter_failed = metrics + + @histogram_duration.time() + def inner_wrapper(self, *args, **kwargs): + counter_started.inc() + try: + reply = func(self, *args, **kwargs) + counter_completed.inc() + return reply + except KeyboardInterrupt: # pylint: disable=try-except-raise + raise + except Exception: # pylint: disable=broad-except + counter_failed.inc() + + return inner_wrapper + return outer_wrapper + +def safe_and_metered_rpc_method(metrics_pool : MetricsPool, logger : logging.Logger): + def outer_wrapper(func): + method_name = func.__name__ + metrics = metrics_pool.get_metrics(method_name) + histogram_duration, counter_started, counter_completed, counter_failed = metrics + + @histogram_duration.time() + def inner_wrapper(self, request, grpc_context : grpc.ServicerContext): + counter_started.inc() + try: + logger.debug('{:s} request: {:s}'.format(method_name, grpc_message_to_json_string(request))) + reply = func(self, request, grpc_context) + logger.debug('{:s} reply: {:s}'.format(method_name, grpc_message_to_json_string(reply))) + counter_completed.inc() + return reply + except ServiceException as e: # pragma: no cover (ServiceException not thrown) + if e.code not in [grpc.StatusCode.NOT_FOUND, grpc.StatusCode.ALREADY_EXISTS]: + # Assume not found or already exists is just a condition, not an error + logger.exception('{:s} exception'.format(method_name)) + counter_failed.inc() + grpc_context.abort(e.code, e.details) + except Exception as e: # pragma: no cover, pylint: disable=broad-except + logger.exception('{:s} exception'.format(method_name)) + counter_failed.inc() + grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) + return inner_wrapper + return outer_wrapper diff --git a/src/common/rpc_method_wrapper/ServiceExceptions.py b/src/common/method_wrappers/ServiceExceptions.py similarity index 100% rename from src/common/rpc_method_wrapper/ServiceExceptions.py rename to src/common/method_wrappers/ServiceExceptions.py diff --git a/src/common/rpc_method_wrapper/__init__.py b/src/common/method_wrappers/__init__.py similarity index 100% rename from src/common/rpc_method_wrapper/__init__.py rename to src/common/method_wrappers/__init__.py diff --git a/src/common/method_wrappers/results-perf-eval/MW/dev-drv-mw.png b/src/common/method_wrappers/results-perf-eval/MW/dev-drv-mw.png new file mode 100644 index 0000000000000000000000000000000000000000..a5732f8d162182a014497f219b510baa3d5ac105 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/MW/dev-drv-mw.png differ diff --git a/src/common/method_wrappers/results-perf-eval/MW/generate.sh b/src/common/method_wrappers/results-perf-eval/MW/generate.sh new file mode 100755 index 0000000000000000000000000000000000000000..bb86e747580cc48cfd93d9091584ebe66a2586fc --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/MW/generate.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +python generate_plot.py "DEVICE_DRIVER_MW" +python generate_plot.py "SERVICE_HANDLER_MW" diff --git a/src/common/method_wrappers/results-perf-eval/MW/generate_plot.py b/src/common/method_wrappers/results-perf-eval/MW/generate_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..beae663f671b73b685e5ac8ec2048df35771c862 --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/MW/generate_plot.py @@ -0,0 +1,69 @@ +import enum, sys +import numpy as np +import matplotlib.pyplot as plt + +class PlotName(enum.Enum): + DEVICE_DRIVER_MW = 'dev-drv-mw' + SERVICE_HANDLER_MW = 'srv-hlr-mw' + +plot_name = PlotName.__members__.get(sys.argv[1]) +if plot_name is None: raise Exception('Unsupported plot: {:s}'.format(str(plot_name))) + +PLOTS = { + PlotName.DEVICE_DRIVER_MW: ( + #'Device Driver - MicroWave', '0.0001-100', [ + # ('GetConfig', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,10,172,0,1,0,0,0,0,0,0]), + # ('SetConfig', [89,1,0,0,0,0,0,0,0,0,0,0,0,0,0,6,34,50,1,0,0,0,0,0,0,0]), + # ('DeleteConfig', [90,1,0,0,0,0,0,0,0,0,0,0,0,0,2,3,0,4,72,12,0,0,0,0,0,0]), + #]), + 'Device Driver - MicroWave', '0.1-10', [ + ('GetConfig', [0,1,0,10,172,0,1,0]), + ('SetConfig', [0,0,6,34,50,1,0,0]), + ('DeleteConfig', [0,2,3,0,4,72,12,0]), + ]), + PlotName.SERVICE_HANDLER_MW: ( + 'Service Handler - L2NM MicroWave', '1-100', [ + ('SetEndpoint', [0,1,0,1,5,75,6,0]), + ('DeleteEndpoint', [0,0,0,0,1,77,17,0]), + ]), +} + +BINS_RANGES = { + '0.0001-100' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, + 25, 50, 75, 100, 200], + '0.1-10' : [0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10], + '0.0001-1' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1], + '0.0001-0.25' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25], + '1-100' : [1, 2.5, 5, 7.5, 10, 25, 50, 75, 100], + '0.001-100' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, 25, 50, 75, 100, 200], + '0.001-7.5' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10], + '0.01-5' : [0, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5], +} + +# plot the cumulative histogram +fig, ax = plt.subplots(figsize=(8, 8)) + +bins = PLOTS[plot_name][1] +if isinstance(bins, str): bins = BINS_RANGES[PLOTS[plot_name][1]] +bins = np.array(bins).astype(float) + +for label, counts in PLOTS[plot_name][2]: + counts = np.array(counts).astype(float) + assert len(bins) == len(counts) + 1 + centroids = (bins[1:] + bins[:-1]) / 2 + ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True, + histtype='step', cumulative=True, label=label) + +ax.grid(True) +ax.legend(loc='upper left') +ax.set_title(PLOTS[plot_name][0]) +ax.set_xlabel('seconds') +ax.set_ylabel('Likelihood of occurrence') +plt.xscale('log') +plt.savefig('{:s}.png'.format(plot_name.value), dpi = (600)) +plt.show() diff --git a/src/common/method_wrappers/results-perf-eval/MW/srv-hlr-mw.png b/src/common/method_wrappers/results-perf-eval/MW/srv-hlr-mw.png new file mode 100644 index 0000000000000000000000000000000000000000..70368ade484fd07310b521b6ea8182b223604922 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/MW/srv-hlr-mw.png differ diff --git a/src/common/method_wrappers/results-perf-eval/OpenConfig/dev-drv-openconfig.png b/src/common/method_wrappers/results-perf-eval/OpenConfig/dev-drv-openconfig.png new file mode 100644 index 0000000000000000000000000000000000000000..7130d5cc721a5b3dd419f0eba1217664aab064d4 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/OpenConfig/dev-drv-openconfig.png differ diff --git a/src/common/method_wrappers/results-perf-eval/OpenConfig/generate.sh b/src/common/method_wrappers/results-perf-eval/OpenConfig/generate.sh new file mode 100755 index 0000000000000000000000000000000000000000..0ecd000fd52dc8b5d40b0b115f09b4e957582240 --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/OpenConfig/generate.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +python generate_plot.py "DEVICE_DRIVER_OPENCONFIG" +python generate_plot.py "SERVICE_HANDLER_OPENCONFIG_L2NM" +python generate_plot.py "SERVICE_HANDLER_OPENCONFIG_L3NM" diff --git a/src/common/method_wrappers/results-perf-eval/OpenConfig/generate_plot.py b/src/common/method_wrappers/results-perf-eval/OpenConfig/generate_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..c6fda331d7782acefd47759d28d7e1d82508efef --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/OpenConfig/generate_plot.py @@ -0,0 +1,74 @@ +import enum, sys +import numpy as np +import matplotlib.pyplot as plt + +class PlotName(enum.Enum): + DEVICE_DRIVER_OPENCONFIG = 'dev-drv-openconfig' + SERVICE_HANDLER_OPENCONFIG_L2NM = 'srv-hlr-openconfig-l2nm' + SERVICE_HANDLER_OPENCONFIG_L3NM = 'srv-hlr-openconfig-l3nm' + +plot_name = PlotName.__members__.get(sys.argv[1]) +if plot_name is None: raise Exception('Unsupported plot: {:s}'.format(str(plot_name))) + +PLOTS = { + PlotName.DEVICE_DRIVER_OPENCONFIG: ( + 'Device Driver - OpenConfig', '0.0001-100', [ + #('GetConfig', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,121,0,0,0,0]), + #('SetConfig', [127,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,43,19,0,0,0,0,0,0]), + #('DeleteConfig', [92,1,0,0,0,0,0,0,0,0,0,0,0,0,5,2,8,71,14,0,0,0,0,0,0,0]), + + ('GetConfig', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,121,0,0,0,0]), + ('SetConfig', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,43,19,0,0,0,0,0,0]), + ('DeleteConfig', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,2,8,71,14,0,0,0,0,0,0,0]), + ]), + PlotName.SERVICE_HANDLER_OPENCONFIG_L2NM: ( + 'Service Handler - L2NM OpenConfig', '0.001-100', [ + ('SetEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,10]), + ('DeleteEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,14]), + ]), + PlotName.SERVICE_HANDLER_OPENCONFIG_L3NM: ( + 'Service Handler - L3NM OpenConfig', '0.001-100', [ + ('SetEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,17]), + ('DeleteEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,20]), + ]), +} + +BINS_RANGES = { + '0.0001-100' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, + 25, 50, 75, 100, 200], + '0.1-10' : [0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10], + '0.0001-1' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1], + '0.0001-0.25' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25], + '1-100' : [1, 2.5, 5, 7.5, 10, 25, 50, 75, 100], + '0.001-100' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, 25, 50, 75, 100, 200], + '0.001-7.5' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10], + '0.01-5' : [0, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5], +} + +# plot the cumulative histogram +fig, ax = plt.subplots(figsize=(8, 8)) + +bins = PLOTS[plot_name][1] +if isinstance(bins, str): bins = BINS_RANGES[PLOTS[plot_name][1]] +bins = np.array(bins).astype(float) + +for label, counts in PLOTS[plot_name][2]: + counts = np.array(counts).astype(float) + assert len(bins) == len(counts) + 1 + centroids = (bins[1:] + bins[:-1]) / 2 + ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True, + histtype='step', cumulative=True, label=label) + +ax.grid(True) +ax.legend(loc='upper left') +ax.set_title(PLOTS[plot_name][0]) +ax.set_xlabel('seconds') +ax.set_ylabel('Likelihood of occurrence') +plt.xscale('log') +plt.savefig('{:s}.png'.format(plot_name.value), dpi = (600)) +plt.show() diff --git a/src/common/method_wrappers/results-perf-eval/OpenConfig/srv-hlr-openconfig-l2nm.png b/src/common/method_wrappers/results-perf-eval/OpenConfig/srv-hlr-openconfig-l2nm.png new file mode 100644 index 0000000000000000000000000000000000000000..9b4393a0ee713b0232b2e4eb5bba75f30159f6e9 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/OpenConfig/srv-hlr-openconfig-l2nm.png differ diff --git a/src/common/method_wrappers/results-perf-eval/OpenConfig/srv-hlr-openconfig-l3nm.png b/src/common/method_wrappers/results-perf-eval/OpenConfig/srv-hlr-openconfig-l3nm.png new file mode 100644 index 0000000000000000000000000000000000000000..e4b2d83685dc983cbd2738eecf7064eba74c8e13 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/OpenConfig/srv-hlr-openconfig-l3nm.png differ diff --git a/src/common/method_wrappers/results-perf-eval/TE/te-cdf.py b/src/common/method_wrappers/results-perf-eval/TE/te-cdf.py new file mode 100644 index 0000000000000000000000000000000000000000..60919ba8bb2a1e5976d14bafd727e176cf86b0cd --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/TE/te-cdf.py @@ -0,0 +1,30 @@ +import matplotlib.pyplot as plt + +flow_creation_us = [ + 3.007065,3.007783,3.010780,3.007374,3.006519,3.006668,3.006303,3.006463,3.006758,3.007992,3.012198,3.001413, + 3.007289,3.006241,3.007523,3.007569,3.006643,3.006255,3.007058,3.006111,3.006918,3.007972,3.006829,3.007378, + 3.007666,3.003071,3.006774,3.006060,3.006731,3.005812 +] + +flow_update_us = [ + 3.005123,3.004228,3.003897,3.006692,3.003767,3.003749,3.004626,3.004333,3.004449,3.003895,3.004092,3.003979, + 3.005099,3.213206,3.004625,3.004707,3.004187,3.004609,3.003885,3.004064,3.004308,3.004280,3.004423,3.211980, + 3.004138,3.004394,3.004018,3.004747,3.005719,3.003656 +] + +n_bins = 10 +fig, ax = plt.subplots(figsize=(8, 8)) + +# plot the cumulative histograms +n, bins, _ = ax.hist(flow_creation_us, n_bins, density=True, histtype='step', cumulative=True, label='FlowCreate') +print(n, bins) +n, bins, _ = ax.hist(flow_update_us, n_bins, density=True, histtype='step', cumulative=True, label='FlowUpdate') +print(n, bins) + +ax.grid(True) +ax.legend(loc='lower center') +ax.set_title('TE Flow Management Delay') +ax.set_xlabel('seconds') +ax.set_ylabel('Likelihood of occurrence') +plt.savefig('te-perf-eval.png', dpi = (600)) +plt.show() diff --git a/src/common/method_wrappers/results-perf-eval/TE/te-perf-eval.png b/src/common/method_wrappers/results-perf-eval/TE/te-perf-eval.png new file mode 100644 index 0000000000000000000000000000000000000000..5b2fd8160bddb4322d8fbb22dac950598bf2115b Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/TE/te-perf-eval.png differ diff --git a/src/common/method_wrappers/results-perf-eval/XR/dev-drv-xr-with-outliers.png b/src/common/method_wrappers/results-perf-eval/XR/dev-drv-xr-with-outliers.png new file mode 100644 index 0000000000000000000000000000000000000000..9607ce0436ee962b7fa5b7ed333ca219ef097884 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/XR/dev-drv-xr-with-outliers.png differ diff --git a/src/common/method_wrappers/results-perf-eval/XR/dev-drv-xr.png b/src/common/method_wrappers/results-perf-eval/XR/dev-drv-xr.png new file mode 100644 index 0000000000000000000000000000000000000000..61f7d3e394676a05f240d032ce62124a70d8b8bb Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/XR/dev-drv-xr.png differ diff --git a/src/common/method_wrappers/results-perf-eval/XR/generate.sh b/src/common/method_wrappers/results-perf-eval/XR/generate.sh new file mode 100755 index 0000000000000000000000000000000000000000..eae3eb6d6e615f3be97b2df9f44ce544523ddb72 --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/XR/generate.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +python generate_plot.py "DEVICE_DRIVER_XR" diff --git a/src/common/method_wrappers/results-perf-eval/XR/generate_plot.py b/src/common/method_wrappers/results-perf-eval/XR/generate_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..66110b44400719a796bc86bf6ea1b0c7a1d44224 --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/XR/generate_plot.py @@ -0,0 +1,61 @@ +import enum, sys +import numpy as np +import matplotlib.pyplot as plt + +class PlotName(enum.Enum): + DEVICE_DRIVER_XR = 'dev-drv-xr' + +plot_name = PlotName.__members__.get(sys.argv[1]) +if plot_name is None: raise Exception('Unsupported plot: {:s}'.format(str(plot_name))) + +PLOTS = { + PlotName.DEVICE_DRIVER_XR: ( + #'Device Driver - XR', '0.0001-0.25', [ + # ('GetConfig', [0,0,0,0,0,0,0,0,0,77,1,1,0,0]), + # ('SetConfig', [0,15,17,7,0,0,0,0,0,0,34,3,2,0]), + # ('DeleteConfig', [23,16,0,0,0,0,0,0,0,1,32,5,1,0]), + #]), + 'Device Driver - XR', '0.0001-0.25', [ + ('GetConfig', [0,0,0,0,0,0,0,0,0,77,1,1,0,0]), + ('SetConfig', [0,0,0,0,0,0,0,0,0,0,34,3,2,0]), + ('DeleteConfig', [0,0,0,0,0,0,0,0,0,1,32,5,1,0]), + ]), +} + +BINS_RANGES = { + '0.0001-100' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, + 25, 50, 75, 100, 200], + '0.0001-1' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1], + '0.0001-0.25' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25], + '0.001-100' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, 25, 50, 75, 100, 200], + '0.001-7.5' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10], + '0.01-5' : [0, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5], +} + +# plot the cumulative histogram +fig, ax = plt.subplots(figsize=(8, 8)) + +bins = PLOTS[plot_name][1] +if isinstance(bins, str): bins = BINS_RANGES[PLOTS[plot_name][1]] +bins = np.array(bins).astype(float) + +for label, counts in PLOTS[plot_name][2]: + counts = np.array(counts).astype(float) + assert len(bins) == len(counts) + 1 + centroids = (bins[1:] + bins[:-1]) / 2 + ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True, + histtype='step', cumulative=True, label=label) + +ax.grid(True) +ax.legend(loc='upper left') +ax.set_title(PLOTS[plot_name][0]) +ax.set_xlabel('seconds') +ax.set_ylabel('Likelihood of occurrence') +plt.xscale('log') +plt.savefig('{:s}.png'.format(plot_name.value), dpi = (600)) +plt.show() diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-emu-l2nm.png b/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-emu-l2nm.png new file mode 100644 index 0000000000000000000000000000000000000000..84ab8747e2a6ca5153e4b6e02483030ab891a421 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-emu-l2nm.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-emu-l3nm.png b/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-emu-l3nm.png new file mode 100644 index 0000000000000000000000000000000000000000..d37827794618024ddaa553de8d2134b455aa31a0 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-emu-l3nm.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-tapi.png b/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-tapi.png new file mode 100644 index 0000000000000000000000000000000000000000..70b51e144e259191d9d174abfebd5a6ff3a52e57 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp1-dev-drv-tapi.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp1-pathcomp-rpc-compute.png b/src/common/method_wrappers/results-perf-eval/emulated/exp1-pathcomp-rpc-compute.png new file mode 100644 index 0000000000000000000000000000000000000000..af84e7e9d5866671e9441c1a93b9d2a66a9218c9 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp1-pathcomp-rpc-compute.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-l2nm-emu.png b/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-l2nm-emu.png new file mode 100644 index 0000000000000000000000000000000000000000..3e58fcbb8d8a25c7bbbd2094a28dad419afbeb02 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-l2nm-emu.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-l3nm-emu.png b/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-l3nm-emu.png new file mode 100644 index 0000000000000000000000000000000000000000..317af9ca13267f313cab9e6d67a3ba068a3993a4 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-l3nm-emu.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-tapi.png b/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-tapi.png new file mode 100644 index 0000000000000000000000000000000000000000..764b4f6cc64db88512b449e91b2ff31d6b260f91 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp1-svc-hdl-tapi.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-connection-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-connection-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..4caa6681993a1850c9e561fed300a16a0d333a87 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-connection-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-device-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-device-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..188d784b68cbc1abb2ea90eaa305863b36cb838a Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-device-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-link-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-link-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..bc3b5ea36e7a278b670699baf7fddf12447510dc Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-link-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-service-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-service-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..74e639b85b70d35c2f0830aa837497646d6b3666 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-service-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-slice-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-slice-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..4060b8ae9b9d2a901b8d0cc2b009ce888f34fab9 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-slice-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-topology-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-topology-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..aae9d07ef4420e53ee4ba3d715f5bf37eae924dd Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-context-topology-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-device-driver-emu.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-device-driver-emu.png new file mode 100644 index 0000000000000000000000000000000000000000..9b6e4d2a47d09f884a1c6e95e5ae27c4df56d55a Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-device-driver-emu.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-device-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-device-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..70182fad226d4a387a13991cfba80a6cbd7d57fe Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-device-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-dlt-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-dlt-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..dae5e567c7395d5e6972a55b5e7053a551da624f Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-dlt-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-pathcomp-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-pathcomp-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..1c48c875f19b3a49238ee473bc265b9358cfbf6a Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-pathcomp-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-service-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-service-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..2c50e199195dec26ac29abf6901103d66d3bca33 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-service-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-slice-rpcs.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-slice-rpcs.png new file mode 100644 index 0000000000000000000000000000000000000000..1292bc5e717634807ce835d481bd08424ef19f2b Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-slice-rpcs.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-svc-hdl-l2nm-emu.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-svc-hdl-l2nm-emu.png new file mode 100644 index 0000000000000000000000000000000000000000..403baefa0ccd07a70c7fd08db964ae30c3a56301 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-svc-hdl-l2nm-emu.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/exp2-svc-hdl-l3nm-emu.png b/src/common/method_wrappers/results-perf-eval/emulated/exp2-svc-hdl-l3nm-emu.png new file mode 100644 index 0000000000000000000000000000000000000000..9c45a0ddf886369fc50a828ded57c26ac6691634 Binary files /dev/null and b/src/common/method_wrappers/results-perf-eval/emulated/exp2-svc-hdl-l3nm-emu.png differ diff --git a/src/common/method_wrappers/results-perf-eval/emulated/generate.sh b/src/common/method_wrappers/results-perf-eval/emulated/generate.sh new file mode 100755 index 0000000000000000000000000000000000000000..f7ed1e31f0382ce1f3b1c89e1ab7de465692614c --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/emulated/generate.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +#python generate_plot.py "EXP1_DEVICE_DRIVER_EMU_L2NM" +#python generate_plot.py "EXP1_DEVICE_DRIVER_EMU_L3NM" +python generate_plot.py "EXP1_DEVICE_DRIVER_TAPI" +#python generate_plot.py "EXP2_DEVICE_DRIVER_EMU" + +# ----- All ------------------------------------------------------------ +#python generate_plot.py "EXP1_DEVICE_DRIVER_EMU_L2NM" +#python generate_plot.py "EXP1_DEVICE_DRIVER_EMU_L3NM" +#python generate_plot.py "EXP1_DEVICE_DRIVER_TAPI" +#python generate_plot.py "EXP1_SERVICE_HANDLER_EMU_L2NM" +#python generate_plot.py "EXP1_SERVICE_HANDLER_EMU_L3NM" +#python generate_plot.py "EXP1_SERVICE_HANDLER_TAPI" +#python generate_plot.py "EXP1_COMP_PATHCOMP_RPC_COMPUTE" +#python generate_plot.py "EXP2_DEVICE_DRIVER_EMU" +#python generate_plot.py "EXP2_SERVICE_HANDLER_EMU_L2NM" +#python generate_plot.py "EXP2_SERVICE_HANDLER_EMU_L3NM" +#python generate_plot.py "EXP2_COMP_CONTEXT_DEVICE_RPCS" +#python generate_plot.py "EXP2_COMP_CONTEXT_LINK_RPCS" +#python generate_plot.py "EXP2_COMP_CONTEXT_SERVICE_RPCS" +#python generate_plot.py "EXP2_COMP_CONTEXT_SLICE_RPCS" +#python generate_plot.py "EXP2_COMP_CONTEXT_TOPOLOGY_RPCS" +#python generate_plot.py "EXP2_COMP_CONTEXT_CONNECTION_RPCS" +#python generate_plot.py "EXP2_COMP_DEVICE_RPCS" +#python generate_plot.py "EXP2_COMP_SERVICE_RPCS" +#python generate_plot.py "EXP2_COMP_SLICE_RPCS" +#python generate_plot.py "EXP2_COMP_PATHCOMP_RPCS" +#python generate_plot.py "EXP2_COMP_DLT_RPCS" diff --git a/src/common/method_wrappers/results-perf-eval/emulated/generate_plot.py b/src/common/method_wrappers/results-perf-eval/emulated/generate_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..74ae49c26b56cae22d812496633a96ea1b595405 --- /dev/null +++ b/src/common/method_wrappers/results-perf-eval/emulated/generate_plot.py @@ -0,0 +1,219 @@ +import enum, sys +import numpy as np +import matplotlib.pyplot as plt + +class PlotName(enum.Enum): + EXP1_DEVICE_DRIVER_EMU_L2NM = 'exp1-dev-drv-emu-l2nm' + EXP1_DEVICE_DRIVER_EMU_L3NM = 'exp1-dev-drv-emu-l3nm' + EXP1_DEVICE_DRIVER_TAPI = 'exp1-dev-drv-tapi' + EXP1_SERVICE_HANDLER_EMU_L2NM = 'exp1-svc-hdl-l2nm-emu' + EXP1_SERVICE_HANDLER_EMU_L3NM = 'exp1-svc-hdl-l3nm-emu' + EXP1_SERVICE_HANDLER_TAPI = 'exp1-svc-hdl-tapi' + EXP1_COMP_PATHCOMP_RPC_COMPUTE = 'exp1-pathcomp-rpc-compute' + + EXP2_DEVICE_DRIVER_EMU = 'exp2-device-driver-emu' + EXP2_SERVICE_HANDLER_EMU_L2NM = 'exp2-svc-hdl-l2nm-emu' + EXP2_SERVICE_HANDLER_EMU_L3NM = 'exp2-svc-hdl-l3nm-emu' + + EXP2_COMP_CONTEXT_DEVICE_RPCS = 'exp2-context-device-rpcs' + EXP2_COMP_CONTEXT_LINK_RPCS = 'exp2-context-link-rpcs' + EXP2_COMP_CONTEXT_SERVICE_RPCS = 'exp2-context-service-rpcs' + EXP2_COMP_CONTEXT_SLICE_RPCS = 'exp2-context-slice-rpcs' + EXP2_COMP_CONTEXT_TOPOLOGY_RPCS = 'exp2-context-topology-rpcs' + EXP2_COMP_CONTEXT_CONNECTION_RPCS = 'exp2-context-connection-rpcs' + EXP2_COMP_DEVICE_RPCS = 'exp2-device-rpcs' + EXP2_COMP_SERVICE_RPCS = 'exp2-service-rpcs' + EXP2_COMP_SLICE_RPCS = 'exp2-slice-rpcs' + EXP2_COMP_PATHCOMP_RPCS = 'exp2-pathcomp-rpcs' + EXP2_COMP_DLT_RPCS = 'exp2-dlt-rpcs' + +plot_name = PlotName.__members__.get(sys.argv[1]) +if plot_name is None: raise Exception('Unsupported plot: {:s}'.format(str(plot_name))) + +PLOTS = { + PlotName.EXP1_DEVICE_DRIVER_EMU_L2NM: ( + 'Device Driver - Emulated (using L2NM services)', '0.0001-1', [ + ('GetConfig', [0,27,252,212,160,261,26,2,3,9,19,11,2,1,0,0,0]), + ('SetConfig', [575,56,112,78,61,82,8,0,2,5,5,0,1, 0,0,0,0]), + ('DeleteConfig', [606,96,150,66,29,31,5,0,0,1,1,0, 0,0,0,0,0]), + ]), + + PlotName.EXP1_DEVICE_DRIVER_EMU_L3NM: ( + 'Device Driver - Emulated (using L3NM services)', '0.0001-1', [ + ('GetConfig', [0,1,40,83,127,460,132,24,13,39,36,31,9,5,1,0,0]), + ('SetConfig', [487,29,110,52,55,171,48,6,6,15,12,8,0,1, 0,0,0]), + ('DeleteConfig', [510,86,79,43,26,120,70,20,6,9,15,8,5,3, 0,0,0]), + ]), + + PlotName.EXP1_DEVICE_DRIVER_TAPI: ( + #'Device Driver - TAPI', '0.0001-1', [ + # ('GetConfig', [0,0,0,0,0,0,0,0,0,1,1,3,10,159,14, 0,0]), + # ('SetConfig', [92,3,1,0,0,0,0,6,11,47,13,13,0,0,0,0,0]), + # ('DeleteConfig', [90,0,0,0,0,0,3,14,25,35,6,11,2,0,0,0,0]), + #]), + 'Device Driver - TAPI', '0.0001-1', [ + ('GetConfig', [0,0,0,0,0,0,0,0,0,1,1,3,10,159,14, 0,0]), + ('SetConfig', [0,0,0,0,0,0,0,6,11,47,13,13,0,0,0,0,0]), + ('DeleteConfig', [0,0,0,0,0,0,3,14,25,35,6,11,2,0,0,0,0]), + ]), + + PlotName.EXP1_SERVICE_HANDLER_EMU_L2NM: ( + 'Service Handler - L2NM Emulated', '0.001-100', [ + ('SetEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,41,35,12,1,0]), + ('DeleteEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,3,29,45,7,0,0]), + ]), + + PlotName.EXP1_SERVICE_HANDLER_EMU_L3NM: ( + 'Service Handler - L3NM Emulated', '0.001-100', [ + ('SetEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,8,27,29,15,11]), + ('DeleteEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,6,22,29,18,11]), + ]), + + PlotName.EXP1_SERVICE_HANDLER_TAPI: ( + 'Service Handler - TAPI', '0.001-100', [ + ('SetEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,4,79,2,0,0,0,0]), + ('DeleteEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,5,0,3,1,73,12,0,0,0,0]), + ]), + + PlotName.EXP1_COMP_PATHCOMP_RPC_COMPUTE: ( + 'PathComp - Compute RPC', '0.01-5', [ + ('Compute (using L2NM services)', [0,0,20,32,14,22,0]), + ('Compute (using L3NM services)', [0,1,1,10,17,59,2]), + ('Compute (using TAPI services)', [3,70,10,3,2,6,0]), + ]), + + PlotName.EXP2_DEVICE_DRIVER_EMU: ( + 'Device Driver - Emulated', '0.0001-0.25', [ + ('GetConfig', [0,21,198,247,190,332,28,5,3,7,14,8,0,0]), + ('SetConfig', [558,61,139,85,57,117,22,1,2,4,1,5,1,0]), + ('DeleteConfig', [573,123,142,63,30,78,24,2,2,8,5,2,0,0]), + ]), + + PlotName.EXP2_SERVICE_HANDLER_EMU_L2NM: ( + 'Service Handler - L2NM Emulated', '0.001-100', [ + ('SetEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,2,18,15,4,1,2,2,0,0,0]), + ('DeleteEndpoint', [0,0,0,0,0,0,0,0,0,0,0,1,0,20,20,5,1,0,0,0,0,0]), + ]), + + PlotName.EXP2_SERVICE_HANDLER_EMU_L3NM: ( + 'Service Handler - L3NM Emulated', '0.001-100', [ + ('SetEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,13,24,2,5,4,1,0,0,0]), + ('DeleteEndpoint', [0,0,0,0,0,0,0,0,0,0,0,0,0,11,27,7,3,1,0,0,0,0]), + ]), + + PlotName.EXP2_COMP_CONTEXT_DEVICE_RPCS: ( + 'Context RPCs', '0.001-7.5', [ + ('GetDevice', [0,0,0,0,6,130,348,305,382,578,76,7,6,0,0,0,0]), + ('ListDevices', [0,0,0,0,0,0,0,0,0,4,37,43,8,2,0,0,0]), + ('SetDevice', [0,0,0,0,0,42,236,158,179,380,46,9,0,0,0,0,0]), + ]), + + PlotName.EXP2_COMP_CONTEXT_LINK_RPCS: ( + 'Context RPCs', '0.001-7.5', [ + ('GetLink', [0,1,9,5,1,1,0,0,0,0,0,0,0,0,0,0,0]), + ('ListLinks', [0,0,0,0,0,5,20,23,27,17,2,0,0,0,0,0,0]), + ]), + + PlotName.EXP2_COMP_CONTEXT_SERVICE_RPCS: ( + 'Context RPCs', '0.001-7.5', [ + ('GetService', [124,120,42,55,80,167,62,34,14,33,9,2,1,0,0,0,0]), + ('ListServices', [0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0]), + ('RemoveService', [0,0,3,18,15,29,13,5,1,4,1,0,0,0,0,0,0]), + ('SetService', [6,90,59,51,63,165,70,32,5,12,8,2,0,0,0,0,0]), + ]), + + PlotName.EXP2_COMP_CONTEXT_SLICE_RPCS: ( + 'Context RPCs', '0.001-7.5', [ + ('GetSlice', [30,75,48,24,32,118,56,34,12,19,8,2,0,0,0,0,0]), + ('ListSlices', [0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0]), + ('RemoveSlice', [0,0,2,10,8,14,8,2,1,2,0,0,0,0,0,0,0]), + ('SetSlice', [6,29,22,18,21,70,25,12,11,13,1,0,0,0,0,0,0]), + ('UnsetSlice', [0,12,12,8,1,3,6,3,1,2,0,0,0,0,0,0,0]), + ]), + + PlotName.EXP2_COMP_CONTEXT_TOPOLOGY_RPCS: ( + 'Context RPCs', '0.001-7.5', [ + ('GetTopology', [72,11,0,0,0,2,6,1,0,1,1,0,0,0,0,0,0]), + ('ListTopologies', [0,0,0,0,5,38,25,10,6,10,0,0,0,0,0,0,0]), + ('ListTopologyIds', [1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]), + ]), + + PlotName.EXP2_COMP_CONTEXT_CONNECTION_RPCS: ( + 'Context RPCs', '0.001-7.5', [ + ('ListConnections', [13,21,5,19,23,145,46,27,10,15,4,2,0,1,0,0,0]), + ('ListContextIds', [1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]), + ('RemoveConnection', [0,0,17,17,12,23,11,2,2,6,0,0,0,0,0,0,0]), + ('SetConnection', [0,0,17,26,9,18,4,7,2,5,4,2,0,0,0,0,0]), + ]), + + PlotName.EXP2_COMP_DEVICE_RPCS: ( + 'Device RPCs', '0.001-7.5', [ + ('AddDevice', [0,0,0,0,0,0,0,1,2,4,0,0,0,0,0,0,0]), + ('ConfigureDevice', [0,0,0,0,0,0,0,0,2,140,367,243,127,143,19,9,5]), + ]), + + PlotName.EXP2_COMP_SERVICE_RPCS: ( + 'Service RPCs', '0.001-7.5', [ + ('CreateService', [0,0,0,9,11,32,13,10,4,7,3,2,1,0,0,0,0]), + ('UpdateService', [0,0,0,0,0,0,0,0,0,0,0,0,0,19,41,15,18]), + ('DeleteService', [0,0,0,0,0,0,0,0,0,0,0,0,1,23,45,21,6]), + ]), + + PlotName.EXP2_COMP_SLICE_RPCS: ( + 'Slice RPCs', '0.001-7.5', [ + ('CreateSlice', [0,0,0,0,0,4,5,4,10,11,6,2,1,0,0,0,0]), + ('UpdateSlice', [0,0,0,0,0,0,0,0,0,0,0,0,0,6,20,10,10]), + ('DeleteSice', [0,0,0,0,0,0,0,0,0,0,0,0,0,9,21,15,2]), + ]), + + PlotName.EXP2_COMP_PATHCOMP_RPCS: ( + 'PathComp RPCs', '0.001-7.5', [ + ('Compute', [0,0,0,0,0,0,0,0,0,0,13,43,22,14,0,0,0]), + ]), + + PlotName.EXP2_COMP_DLT_RPCS: ( + 'DLT RPCs', '0.001-7.5', [ + ('RecordDevice', [0,0,0,0,0,0,0,1,4,5,6,6,0,26,71,94,306]), + ('RecordLink', [0,0,0,0,0,0,0,0,0,0,0,0,0,16,0,0,0]), + ('RecordService', [0,0,0,0,0,0,0,0,0,0,0,0,0,5,15,30,184]), + ('RecordSlice', [0,0,0,0,0,0,0,0,2,3,1,2,1,6,19,23,82]), + ]), +} + +BINS_RANGES = { + '0.0001-100' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, + 25, 50, 75, 100, 200], + '0.0001-1' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1], + '0.0001-0.25' : [0, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.0025, 0.005, 0.0075, + 0.01, 0.025, 0.05, 0.075, 0.1, 0.25], + '0.001-100' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, 25, 50, 75, 100, 200], + '0.001-7.5' : [0, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, + 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10], + '0.01-5' : [0, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5], +} + +# plot the cumulative histogram +fig, ax = plt.subplots(figsize=(8, 8)) + +bins = PLOTS[plot_name][1] +if isinstance(bins, str): bins = BINS_RANGES[PLOTS[plot_name][1]] +bins = np.array(bins).astype(float) + +for label, counts in PLOTS[plot_name][2]: + counts = np.array(counts).astype(float) + assert len(bins) == len(counts) + 1 + centroids = (bins[1:] + bins[:-1]) / 2 + ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True, + histtype='step', cumulative=True, label=label) + +ax.grid(True) +ax.legend(loc='best') +ax.set_title(PLOTS[plot_name][0]) +ax.set_xlabel('seconds') +ax.set_ylabel('Likelihood of occurrence') +plt.xscale('log') +plt.savefig('{:s}.png'.format(plot_name.value), dpi = (600)) +plt.show() diff --git a/src/common/method_wrappers/tests/DummyDeviceDriver.py b/src/common/method_wrappers/tests/DummyDeviceDriver.py new file mode 100644 index 0000000000000000000000000000000000000000..f4fe1169e8b59f1fd11ee3d7dd1fa85198c43374 --- /dev/null +++ b/src/common/method_wrappers/tests/DummyDeviceDriver.py @@ -0,0 +1,39 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random, time +from common.perf_eval_method_wrapper.Decorator import MetricsPool, meter_method + +EXCEPTION_RATIO = 0.05 + +METRICS_POOL = MetricsPool(labels={'driver': 'dummy'}) + +class DummyDeviceDriver: + def __init__(self) -> None: + pass + + @meter_method(METRICS_POOL) + def get_config(self): + if random.random() < EXCEPTION_RATIO: raise Exception() + time.sleep(random.random()) + + @meter_method(METRICS_POOL) + def set_config(self): + if random.random() < EXCEPTION_RATIO: raise Exception() + time.sleep(random.random()) + + @meter_method(METRICS_POOL) + def del_config(self): + if random.random() < EXCEPTION_RATIO: raise Exception() + time.sleep(random.random()) diff --git a/src/common/method_wrappers/tests/README.md b/src/common/method_wrappers/tests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..db9c0687098981d5410326c1330294931f496e3c --- /dev/null +++ b/src/common/method_wrappers/tests/README.md @@ -0,0 +1,63 @@ +# Performance Evaluation Method Wrapper + +## Description: + +- enable prometheus addon: +``` +tfs@tfs-vm:~/tfs-ctrl$ microk8s.enable prometheus +``` + +- wait till prometheus becomes enabled (when enabled, press Ctrl+C): +``` +tfs@tfs-vm:~/tfs-ctrl$ watch -n 1 microk8s.status --wait-ready +``` + +- wait till all pods in the monitoring namespace have STATE=Running and READY=X/X (when done, press Ctrl+C): +``` +tfs@tfs-vm:~/tfs-ctrl$ watch -n 1 kubectl get pods --all-namespaces +``` + +- deploy as: +``` +tfs@tfs-vm:~/tfs-ctrl$ source src/common/method_wrappers/tests/deploy_specs.sh +tfs@tfs-vm:~/tfs-ctrl$ ./deploy.sh +``` + +- expose prometheus and grafana + - (required) terminal 1 (grafana UI): `kubectl port-forward -n monitoring service/grafana --address 0.0.0.0 3001:3000` + - (optional) terminal 2 (prometheus UI): `kubectl port-forward -n monitoring service/prometheus-k8s --address 0.0.0.0 9090:9090` + - (optional) terminal 3 (alertmanager UI): `kubectl port-forward -n monitoring service/alertmanager-main --address 0.0.0.0 9093:9093` + +- if using remote server/VM for running MicroK8s and VSCode, forward ports 3001, 9090, 9093 + +- (only used for internal framework debugging) run manual tests over the performance evaluation framework + - terminal 4: + ``` + export PYTHONPATH=/home/tfs/tfs-ctrl/src + python -m common.method_wrappers.tests + ``` + +- log into grafana: + - browse: http://127.0.0.1:3000 + - user/pass: admin/admin + - upload dashboards through "left menu > Dashboards > Manage > Import" + - upload grafana_prometheus_component_rpc.json + - upload grafana_prometheus_device_driver.json + - upload grafana_prometheus_service_handler.json + - watch in real time the dashboard + +- upload topology through WebUI and navigate + - should see histogram changing in Grafana + +## References: +- [Prometheus - Tutorials - Getting Started](https://prometheus.io/docs/tutorials/getting_started/) +- [Prometheus - Tutorials - Understanding metric types](https://prometheus.io/docs/tutorials/understanding_metric_types/) +- [Prometheus - Tutorials - Instrumenting HTTP server in Go](https://prometheus.io/docs/tutorials/instrumenting_http_server_in_go/) +- [Prometheus - Tutorials - Visualizing metrics using Grafana](https://prometheus.io/docs/tutorials/visualizing_metrics_using_grafana/) +- [Prometheus - Tutorials - Alerting based on metrics](https://prometheus.io/docs/tutorials/alerting_based_on_metrics/) +- [Prometheus Operator - Guide](https://www.infracloud.io/blogs/prometheus-operator-helm-guide/) +- [Prometheus Operator - ServiceMonitor definition](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor) +- [Prometheus Operator - ServiceMonitor example 1](https://stackoverflow.com/questions/45613660/how-do-you-add-scrape-targets-to-a-prometheus-server-that-was-installed-with-kub) +- [Prometheus Operator - ServiceMonitor example 2](https://stackoverflow.com/questions/52991038/how-to-create-a-servicemonitor-for-prometheus-operator) +- [How to visualize Prometheus histograms in Grafana](https://grafana.com/blog/2020/06/23/how-to-visualize-prometheus-histograms-in-grafana/) +- [Prometheus Histograms with Grafana Heatmaps](https://towardsdatascience.com/prometheus-histograms-with-grafana-heatmaps-d556c28612c7) diff --git a/src/common/rpc_method_wrapper/tests/__init__.py b/src/common/method_wrappers/tests/__init__.py similarity index 100% rename from src/common/rpc_method_wrapper/tests/__init__.py rename to src/common/method_wrappers/tests/__init__.py diff --git a/src/common/method_wrappers/tests/__main__.py b/src/common/method_wrappers/tests/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..505991f61ddb955196218230bfd533d8ea6ddf0d --- /dev/null +++ b/src/common/method_wrappers/tests/__main__.py @@ -0,0 +1,32 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, random +from prometheus_client import start_http_server +from .DummyDeviceDriver import DummyDeviceDriver + +logging.basicConfig(level=logging.DEBUG) +LOGGER = logging.getLogger(__name__) + +def main(): + # Start up the server to expose the metrics + start_http_server(8000) + + ddd = DummyDeviceDriver() + while True: + func = random.choice([ddd.get_config, ddd.set_config, ddd.del_config]) + func() + +if __name__ == '__main__': + main() diff --git a/src/common/method_wrappers/tests/deploy_specs.sh b/src/common/method_wrappers/tests/deploy_specs.sh new file mode 100644 index 0000000000000000000000000000000000000000..238918480ae857e64efb52f652b20ab08a21c2df --- /dev/null +++ b/src/common/method_wrappers/tests/deploy_specs.sh @@ -0,0 +1,26 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +# Supported components are: +# context device automation policy service compute monitoring webui +# interdomain slice pathcomp dlt +# dbscanserving opticalattackmitigator opticalattackdetector +# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector +export TFS_COMPONENTS="context device pathcomp service slice webui" # automation monitoring compute + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicemonitors.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="NO" #${TFS_SKIP_BUILD:-"YES"} diff --git a/src/common/method_wrappers/tests/grafana_prometheus_component_rpc.json b/src/common/method_wrappers/tests/grafana_prometheus_component_rpc.json new file mode 100644 index 0000000000000000000000000000000000000000..b5b857e7573264f26289ba9a72ec5444e4ac71a4 --- /dev/null +++ b/src/common/method_wrappers/tests/grafana_prometheus_component_rpc.json @@ -0,0 +1,426 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 25, + "iteration": 1671297223428, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_started_total{pod=~\"[[pod]]\"})", + "interval": "", + "legendFormat": "started", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_completed_total{pod=~\"[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "completed", + "refId": "B" + }, + { + "exemplar": true, + "expr": "sum(tfs_[[component]]_rpc_[[method]]_counter_requests_started_total{pod=~\"[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "failed", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:935", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:936", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "linear", + "colorScheme": "interpolateRdYlGn", + "exponent": 0.5, + "max": null, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 6 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 2, + "interval": "60s", + "legend": { + "show": true + }, + "pluginVersion": "7.5.4", + "reverseYBuckets": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(\r\n max_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m]) -\r\n min_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~\"[[pod]]\"}[1m])\r\n) by (le)", + "format": "heatmap", + "instant": false, + "interval": "1m", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Histogram", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": null, + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(tfs_[[component]]_rpc_[[method]]_histogram_duration_sum{pod=~\"[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "total time", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total Exec Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:407", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:408", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "context", + "value": "context" + }, + "datasource": "prometheus", + "definition": "metrics(tfs_)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Component", + "multi": false, + "name": "component", + "options": [], + "query": { + "query": "metrics(tfs_)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "/tfs_(.+)_rpc_.*/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "selected": false, + "text": "getcontext", + "value": "getcontext" + }, + "datasource": "prometheus", + "definition": "metrics(tfs_[[component]]_rpc_)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Method", + "multi": false, + "name": "method", + "options": [], + "query": { + "query": "metrics(tfs_[[component]]_rpc_)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "/tfs_[[component]]_rpc_(.+)_histogram_duration_bucket/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": "prometheus", + "definition": "label_values(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket, pod)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Pod", + "multi": true, + "name": "pod", + "options": [], + "query": { + "query": "label_values(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket, pod)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "TFS / Component RPCs", + "uid": "KKxzxIFVz", + "version": 21 +} \ No newline at end of file diff --git a/src/common/method_wrappers/tests/grafana_prometheus_device_driver.json b/src/common/method_wrappers/tests/grafana_prometheus_device_driver.json new file mode 100644 index 0000000000000000000000000000000000000000..2926a409b3b77b16c4e7b5d86ecd7d56f6acdebc --- /dev/null +++ b/src/common/method_wrappers/tests/grafana_prometheus_device_driver.json @@ -0,0 +1,431 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 26, + "iteration": 1671318718779, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(tfs_device_driver_[[method]]_counter_requests_started_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})", + "interval": "", + "legendFormat": "started", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum(tfs_device_driver_[[method]]_counter_requests_completed_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "completed", + "refId": "B" + }, + { + "exemplar": true, + "expr": "sum(tfs_device_driver_[[method]]_counter_requests_failed_total{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "failed", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:864", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:865", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "linear", + "colorScheme": "interpolateRdYlGn", + "exponent": 0.5, + "max": null, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 6 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 2, + "interval": "60s", + "legend": { + "show": true + }, + "pluginVersion": "7.5.4", + "reverseYBuckets": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(\r\n max_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"}[1m]) -\r\n min_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"}[1m])\r\n) by (le)", + "format": "heatmap", + "instant": false, + "interval": "60s", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "title": "Histogram", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": null, + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(tfs_device_driver_[[method]]_histogram_duration_sum{driver=~\"[[driver]]\", pod=~\"deviceservice-[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "total time", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total Exec Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:407", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:408", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": "", + "current": { + "selected": false, + "text": "setconfig", + "value": "setconfig" + }, + "datasource": "prometheus", + "definition": "metrics(tfs_device_driver_.+)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Method", + "multi": false, + "name": "method", + "options": [], + "query": { + "query": "metrics(tfs_device_driver_.+)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "/tfs_device_driver_(.+config)_histogram_duration_bucket/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": "prometheus", + "definition": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, driver)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Driver", + "multi": true, + "name": "driver", + "options": [], + "query": { + "query": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, driver)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": "prometheus", + "definition": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, pod)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Pod", + "multi": true, + "name": "pod", + "options": [], + "query": { + "query": "label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, pod)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "/deviceservice-(.*)/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "TFS / Device / Driver", + "uid": "eAg-wsOVk", + "version": 30 +} \ No newline at end of file diff --git a/src/common/method_wrappers/tests/grafana_prometheus_service_handler.json b/src/common/method_wrappers/tests/grafana_prometheus_service_handler.json new file mode 100644 index 0000000000000000000000000000000000000000..48e770afe4bba9c2eb5df76d3532bf35d6cfe192 --- /dev/null +++ b/src/common/method_wrappers/tests/grafana_prometheus_service_handler.json @@ -0,0 +1,432 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 27, + "iteration": 1671319012315, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(tfs_service_handler_[[method]]_counter_requests_started_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})", + "instant": false, + "interval": "", + "legendFormat": "started", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum(tfs_service_handler_[[method]]_counter_requests_completed_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "completed", + "refId": "B" + }, + { + "exemplar": true, + "expr": "sum(tfs_service_handler_[[method]]_counter_requests_failed_total{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "failed", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:935", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:936", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "linear", + "colorScheme": "interpolateRdYlGn", + "exponent": 0.5, + "max": null, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 6 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 2, + "interval": "60s", + "legend": { + "show": true + }, + "pluginVersion": "7.5.4", + "reverseYBuckets": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(\r\n max_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"}[1m]) -\r\n min_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"}[1m])\r\n) by (le)", + "format": "heatmap", + "instant": false, + "interval": "1m", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "title": "Histogram", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": null, + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(tfs_service_handler_[[method]]_histogram_duration_sum{handler=~\"[[handler]]\", pod=~\"serviceservice-[[pod]]\"})", + "hide": false, + "interval": "", + "legendFormat": "total time", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total Exec Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:407", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:408", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": "", + "current": { + "selected": false, + "text": "setendpoint", + "value": "setendpoint" + }, + "datasource": "prometheus", + "definition": "metrics(tfs_service_handler_.+)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Method", + "multi": false, + "name": "method", + "options": [], + "query": { + "query": "metrics(tfs_service_handler_.+)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "/tfs_service_handler_(.+)_histogram_duration_bucket/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": "prometheus", + "definition": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, handler)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Handler", + "multi": true, + "name": "handler", + "options": [], + "query": { + "query": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, handler)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": "prometheus", + "definition": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, pod)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Pod", + "multi": true, + "name": "pod", + "options": [], + "query": { + "query": "label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, pod)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "/serviceservice-(.*)/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "TFS / Service / Handler", + "uid": "DNOhOIF4k", + "version": 16 +} \ No newline at end of file diff --git a/src/common/method_wrappers/tests/old/docker_grafana.sh b/src/common/method_wrappers/tests/old/docker_grafana.sh new file mode 100644 index 0000000000000000000000000000000000000000..2a1484d5504c69f08b23d652879f4c6bace44548 --- /dev/null +++ b/src/common/method_wrappers/tests/old/docker_grafana.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +docker create -it --name=prometheus -p 9090:9090 \ + -v /home/tfs/tfs-ctrl/test-prom-cli/prometheus.yml:/etc/prometheus/prometheus.yml \ + prom/prometheus + +docker create -it --name=grafana -p 3000:3000 \ + grafana/grafana diff --git a/src/common/method_wrappers/tests/old/prometheus.yml b/src/common/method_wrappers/tests/old/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..af2849209ab75eef57d41d0489bf695baa6d5fde --- /dev/null +++ b/src/common/method_wrappers/tests/old/prometheus.yml @@ -0,0 +1,23 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +global: + scrape_interval: 15s # when Prometheus is pulling data from exporters etc + evaluation_interval: 30s # time between each evaluation of Prometheus' alerting rules + +scrape_configs: +- job_name: ddd # your project name + static_configs: + - targets: + - 172.17.0.1:8000 diff --git a/src/common/method_wrappers/tests/prometheus_queries.txt b/src/common/method_wrappers/tests/prometheus_queries.txt new file mode 100644 index 0000000000000000000000000000000000000000..9eb69e970aa1daffc0a491734dad5b381051e994 --- /dev/null +++ b/src/common/method_wrappers/tests/prometheus_queries.txt @@ -0,0 +1,63 @@ +TFS/Components: +--------------- + +variables: +name=component + query=metrics(tfs_) + regex=/tfs_(.+)_rpc_.*/ +name=method + query=metrics(tfs_[[component]]_rpc_) + regex=/tfs_[[component]]_rpc_(.+)_histogram_duration_bucket/ + +plots: +tfs_[[component]]_rpc_[[method]]_counter_requests_started_total +tfs_[[component]]_rpc_[[method]]_counter_requests_completed_total +tfs_[[component]]_rpc_[[method]]_counter_requests_failed_total +tfs_[[component]]_rpc_[[method]]_histogram_duration_sum +#sum(increase(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket[$__rate_interval])) by (le) +sum( + max_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~"[[pod]]"}[1m]) - + min_over_time(tfs_[[component]]_rpc_[[method]]_histogram_duration_bucket{pod=~"[[pod]]"}[1m]) +) by (le) + + +TFS/Device/Driver: +------------------ + +variables: +name=method + query=metrics(tfs_device_driver_.+) + regex=/tfs_device_driver_(.+config)_histogram_duration_bucket/ +name=driver + query=label_values(tfs_device_driver_[[method]]_histogram_duration_bucket, driver) + regex= + +plots: +tfs_device_driver_[[method]]_counter_requests_started_total{driver="[[driver]]"} +tfs_device_driver_[[method]]_counter_requests_completed_total{driver="[[driver]]"} +tfs_device_driver_[[method]]_counter_requests_failed_total{driver="[[driver]]"} +tfs_device_driver_[[method]]_histogram_duration_sum{driver="[[driver]]"} +#sum(increase(tfs_device_driver_[[method]]_histogram_duration_bucket{driver="[[driver]]"}[$__rate_interval])) by (le) +sum( + max_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver="[[driver]]", pod=~"deviceservice-[[pod]]"}[1m]) - + min_over_time(tfs_device_driver_[[method]]_histogram_duration_bucket{driver="[[driver]]", pod=~"deviceservice-[[pod]]"}[1m]) +) by (le) + +variables: +name=method + query=metrics(tfs_service_handler_.+) + regex=/tfs_service_handler_(.+config)_histogram_duration_bucket/ +name=driver + query=label_values(tfs_service_handler_[[method]]_histogram_duration_bucket, handler) + regex= + +plots: +tfs_service_handler_[[method]]_counter_requests_started_total{handler="[[handler]]"} +tfs_service_handler_[[method]]_counter_requests_completed_total{handler="[[handler]]"} +tfs_service_handler_[[method]]_counter_requests_failed_total{handler="[[handler]]"} +tfs_service_handler_[[method]]_histogram_duration_sum{handler="[[handler]]"} +#sum(increase(tfs_service_handler_[[method]]_histogram_duration_bucket{handler="[[handler]]"}[$__rate_interval])) by (le) +sum( + max_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~"[[handler]]", pod=~"serviceservice-[[pod]]"}[1m]) - + min_over_time(tfs_service_handler_[[method]]_histogram_duration_bucket{handler=~"[[handler]]", pod=~"serviceservice-[[pod]]"}[1m]) +) by (le) diff --git a/src/common/rpc_method_wrapper/tests/test_unitary.py b/src/common/method_wrappers/tests/test_unitary.py similarity index 50% rename from src/common/rpc_method_wrapper/tests/test_unitary.py rename to src/common/method_wrappers/tests/test_unitary.py index c8fc7a2aa187dcb905a8c230b047ffb1171d2ccd..95e40f6411f9ced557f79dca8f907d1d44ed0fac 100644 --- a/src/common/rpc_method_wrapper/tests/test_unitary.py +++ b/src/common/method_wrappers/tests/test_unitary.py @@ -13,24 +13,15 @@ # limitations under the License. import grpc, logging, time -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method logging.basicConfig(level=logging.DEBUG) LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('Context', 'RPC') def test_database_instantiation(): - SERVICE_NAME = 'Context' - METHOD_NAMES = [ - 'ListContextIds', 'ListContexts', 'GetContext', 'SetContext', 'RemoveContext', 'GetContextEvents', - 'ListTopologyIds', 'ListTopologies', 'GetTopology', 'SetTopology', 'RemoveTopology', 'GetTopologyEvents', - 'ListDeviceIds', 'ListDevices', 'GetDevice', 'SetDevice', 'RemoveDevice', 'GetDeviceEvents', - 'ListLinkIds', 'ListLinks', 'GetLink', 'SetLink', 'RemoveLink', 'GetLinkEvents', - 'ListServiceIds', 'ListServices', 'GetService', 'SetService', 'RemoveService', 'GetServiceEvents', - ] - METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) - class TestServiceServicerImpl: - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetTopology(self, request, grpc_context : grpc.ServicerContext): print('doing funny things') time.sleep(0.1) @@ -39,6 +30,7 @@ def test_database_instantiation(): tssi = TestServiceServicerImpl() tssi.GetTopology(1, 2) - for metric_name,metric in METRICS.items(): - if 'GETTOPOLOGY_' not in metric_name: continue - print(metric_name, metric._child_samples()) # pylint: disable=protected-access + for metric_name,metric in METRICS_POOL.metrics.items(): + if 'TFS_CONTEXT_RPC_GETTOPOLOGY_' not in metric_name: continue + print(metric_name, metric._child_samples()) + raise Exception() diff --git a/src/common/orm/HighLevel.py b/src/common/orm/HighLevel.py index a5bdeae3e9607767b5215f6ff87cb0d8624918d0..3a4b0d6618d4b6ddc8ba185d65b796533a8bbc10 100644 --- a/src/common/orm/HighLevel.py +++ b/src/common/orm/HighLevel.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Dict, List, Optional, Set, Tuple -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.method_wrappers.ServiceExceptions import NotFoundException from common.orm.Database import Database from common.orm.backend.Tools import key_to_str from common.orm.fields.ForeignKeyField import ForeignKeyField diff --git a/src/common/rpc_method_wrapper/Decorator.py b/src/common/rpc_method_wrapper/Decorator.py deleted file mode 100644 index 31dc4b82bdaa8762b1dee5af247b3f8b7b9af2af..0000000000000000000000000000000000000000 --- a/src/common/rpc_method_wrapper/Decorator.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import grpc, logging -from enum import Enum -from typing import Dict, List -from prometheus_client import Counter, Histogram -from prometheus_client.metrics import MetricWrapperBase -from common.tools.grpc.Tools import grpc_message_to_json_string -from .ServiceExceptions import ServiceException - -class RequestConditionEnum(Enum): - STARTED = 'started' - COMPLETED = 'completed' - FAILED = 'failed' - -def get_counter_requests(method_name : str, request_condition : RequestConditionEnum) -> Counter: - str_request_condition = request_condition.value - name = '{:s}_counter_requests_{:s}'.format(method_name.replace(':', '_'), str_request_condition) - description = '{:s} counter of requests {:s}'.format(method_name, str_request_condition) - return Counter(name, description) - -def get_histogram_duration(method_name : str) -> Histogram: - name = '{:s}_histogram_duration'.format(method_name.replace(':', '_')) - description = '{:s} histogram of request duration'.format(method_name) - return Histogram(name, description) - -METRIC_TEMPLATES = { - '{:s}_COUNTER_STARTED' : lambda method_name: get_counter_requests (method_name, RequestConditionEnum.STARTED), - '{:s}_COUNTER_COMPLETED' : lambda method_name: get_counter_requests (method_name, RequestConditionEnum.COMPLETED), - '{:s}_COUNTER_FAILED' : lambda method_name: get_counter_requests (method_name, RequestConditionEnum.FAILED), - '{:s}_HISTOGRAM_DURATION': lambda method_name: get_histogram_duration(method_name), -} - -def create_metrics(service_name : str, method_names : List[str]) -> Dict[str, MetricWrapperBase]: - metrics = {} - for method_name in method_names: - for template_name, template_generator_function in METRIC_TEMPLATES.items(): - metric_name = template_name.format(method_name).upper() - metrics[metric_name] = template_generator_function('{:s}:{:s}'.format(service_name, method_name)) - return metrics - -def safe_and_metered_rpc_method(metrics : Dict[str, MetricWrapperBase], logger : logging.Logger): - def outer_wrapper(func): - function_name = func.__name__ - HISTOGRAM_DURATION : Histogram = metrics.get('{:s}_HISTOGRAM_DURATION'.format(function_name).upper()) - COUNTER_STARTED : Counter = metrics.get('{:s}_COUNTER_STARTED' .format(function_name).upper()) - COUNTER_COMPLETED : Counter = metrics.get('{:s}_COUNTER_COMPLETED' .format(function_name).upper()) - COUNTER_FAILED : Counter = metrics.get('{:s}_COUNTER_FAILED' .format(function_name).upper()) - - @HISTOGRAM_DURATION.time() - def inner_wrapper(self, request, grpc_context : grpc.ServicerContext): - COUNTER_STARTED.inc() - try: - logger.debug('{:s} request: {:s}'.format(function_name, grpc_message_to_json_string(request))) - reply = func(self, request, grpc_context) - logger.debug('{:s} reply: {:s}'.format(function_name, grpc_message_to_json_string(reply))) - COUNTER_COMPLETED.inc() - return reply - except ServiceException as e: # pragma: no cover (ServiceException not thrown) - if e.code not in [grpc.StatusCode.NOT_FOUND, grpc.StatusCode.ALREADY_EXISTS]: - # Assume not found or already exists is just a condition, not an error - logger.exception('{:s} exception'.format(function_name)) - COUNTER_FAILED.inc() - grpc_context.abort(e.code, e.details) - except Exception as e: # pragma: no cover, pylint: disable=broad-except - logger.exception('{:s} exception'.format(function_name)) - COUNTER_FAILED.inc() - grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) - return inner_wrapper - return outer_wrapper diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py index f14e2caf6065996ea6223449f309e03d141b5954..e3fc86c0538ae71008c15fd41e70f4f0f3c23763 100644 --- a/src/common/tools/descriptor/Loader.py +++ b/src/common/tools/descriptor/Loader.py @@ -250,5 +250,4 @@ class DescriptorLoader: num_ok += 1 except Exception as e: # pylint: disable=broad-except error_list.append(f'{str(entity)}: {str(e)}') - num_err += 1 self.__results.append((entity_name, action_name, num_ok, error_list)) diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index be8eefe5bc032ad6a45fd54b267db6ab12e3f5b0..0b5ad820c565c50607180e0933795774fd5c2035 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -42,6 +42,16 @@ def json_service( 'service_config' : {'config_rules': copy.deepcopy(config_rules)}, } +def json_service_l2nm_planned( + service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID + ): + + return json_service( + service_uuid, ServiceTypeEnum.SERVICETYPE_L2NM, context_id=json_context_id(context_uuid), + status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, + config_rules=config_rules) + def json_service_l3nm_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID diff --git a/src/common/tools/object_factory/Slice.py b/src/common/tools/object_factory/Slice.py index 6ab666aa6ed379eb0b8948b1178aa13069d70bf4..2376784e3237992ab3d18d9d70db41b3a3f23560 100644 --- a/src/common/tools/object_factory/Slice.py +++ b/src/common/tools/object_factory/Slice.py @@ -14,7 +14,9 @@ import copy from typing import Dict, List, Optional +from common.Constants import DEFAULT_CONTEXT_UUID from common.proto.context_pb2 import SliceStatusEnum +from common.tools.object_factory.Context import json_context_id def get_slice_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: return 'slc:{:s}/{:s}=={:s}/{:s}'.format( @@ -30,13 +32,13 @@ def json_slice_owner(owner_uuid : str, owner_string : str) -> Dict: return {'owner_uuid': {'uuid': owner_uuid}, 'owner_string': owner_string} def json_slice( - slice_uuid : str, context_id : Optional[Dict] = None, + slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, status : SliceStatusEnum = SliceStatusEnum.SLICESTATUS_PLANNED, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], config_rules : List[Dict] = [], service_ids : List[Dict] = [], subslice_ids : List[Dict] = [], owner : Optional[Dict] = None): result = { - 'slice_id' : json_slice_id(slice_uuid, context_id=context_id), + 'slice_id' : json_slice_id(slice_uuid, context_id=json_context_id(context_uuid)), 'slice_status' : {'slice_status': status}, 'slice_endpoint_ids': copy.deepcopy(endpoint_ids), 'slice_constraints' : copy.deepcopy(constraints), diff --git a/src/compute/service/ComputeServiceServicerImpl.py b/src/compute/service/ComputeServiceServicerImpl.py index f8ffd912f065ddc11829f8e9e85559b13576a222..a47a1db6c6fd7ffea16ab23be2997e3c929fb68b 100644 --- a/src/compute/service/ComputeServiceServicerImpl.py +++ b/src/compute/service/ComputeServiceServicerImpl.py @@ -13,56 +13,51 @@ # limitations under the License. import grpc, logging +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.context_pb2 import ( AuthenticationResult, Empty, Service, ServiceId, ServiceIdList, ServiceStatus, TeraFlowController) from common.proto.compute_pb2_grpc import ComputeServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Compute' -METHOD_NAMES = [ - 'CheckCredentials', 'GetConnectivityServiceStatus', 'CreateConnectivityService', 'EditConnectivityService', - 'DeleteConnectivityService', 'GetAllActiveConnectivityServices', 'ClearAllConnectivityServices' -] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Compute', 'RPC') class ComputeServiceServicerImpl(ComputeServiceServicer): def __init__(self): LOGGER.info('Creating Servicer...') LOGGER.info('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CheckCredentials(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: LOGGER.warning('NOT IMPLEMENTED') return AuthenticationResult() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetConnectivityServiceStatus(self, request : ServiceId, context : grpc.ServicerContext) -> ServiceStatus: LOGGER.warning('NOT IMPLEMENTED') return ServiceStatus() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CreateConnectivityService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: LOGGER.warning('NOT IMPLEMENTED') return ServiceId() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def EditConnectivityService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: LOGGER.warning('NOT IMPLEMENTED') return ServiceId() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteConnectivityService(self, request : Service, context : grpc.ServicerContext) -> Empty: LOGGER.warning('NOT IMPLEMENTED') return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetAllActiveConnectivityServices(self, request : Empty, context : grpc.ServicerContext) -> ServiceIdList: LOGGER.warning('NOT IMPLEMENTED') return ServiceIdList() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ClearAllConnectivityServices(self, request : Empty, context : grpc.ServicerContext) -> Empty: LOGGER.warning('NOT IMPLEMENTED') return Empty() diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index f8dd188198606805e42449c3d690c20d3ad45f03..b7130c7000663791b162bc15d5046d80ed71463d 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -15,6 +15,8 @@ import grpc, json, logging, operator, threading from typing import Iterator, List, Set, Tuple from common.message_broker.MessageBroker import MessageBroker +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.method_wrappers.ServiceExceptions import InvalidArgumentException from common.orm.Database import Database from common.orm.HighLevel import ( get_all_objects, get_object, get_or_create_object, get_related_objects, update_or_create_object) @@ -31,8 +33,6 @@ from common.proto.context_pb2 import ( from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) from common.proto.context_pb2_grpc import ContextServiceServicer from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException from common.tools.grpc.Tools import grpc_message_to_json from context.service.database.ConfigModel import update_config from context.service.database.ConnectionModel import ConnectionModel, set_path @@ -56,19 +56,7 @@ from .Constants import ( LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Context' -METHOD_NAMES = [ - 'ListConnectionIds', 'ListConnections', 'GetConnection', 'SetConnection', 'RemoveConnection', 'GetConnectionEvents', - 'ListContextIds', 'ListContexts', 'GetContext', 'SetContext', 'RemoveContext', 'GetContextEvents', - 'ListTopologyIds', 'ListTopologies', 'GetTopology', 'SetTopology', 'RemoveTopology', 'GetTopologyEvents', - 'ListDeviceIds', 'ListDevices', 'GetDevice', 'SetDevice', 'RemoveDevice', 'GetDeviceEvents', - 'ListLinkIds', 'ListLinks', 'GetLink', 'SetLink', 'RemoveLink', 'GetLinkEvents', - 'ListServiceIds', 'ListServices', 'GetService', 'SetService', 'RemoveService', 'GetServiceEvents', - 'ListSliceIds', 'ListSlices', 'GetSlice', 'SetSlice', 'RemoveSlice', 'GetSliceEvents', - 'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule', - 'UnsetService', 'UnsetSlice', -] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Context', 'RPC') class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer): def __init__(self, database : Database, messagebroker : MessageBroker): @@ -81,28 +69,28 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Context ---------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList: with self.lock: db_contexts : List[ContextModel] = get_all_objects(self.database, ContextModel) db_contexts = sorted(db_contexts, key=operator.attrgetter('pk')) return ContextIdList(context_ids=[db_context.dump_id() for db_context in db_contexts]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: with self.lock: db_contexts : List[ContextModel] = get_all_objects(self.database, ContextModel) db_contexts = sorted(db_contexts, key=operator.attrgetter('pk')) return ContextList(contexts=[db_context.dump() for db_context in db_contexts]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: with self.lock: context_uuid = request.context_uuid.uuid db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) return Context(**db_context.dump(include_services=True, include_topologies=True)) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: with self.lock: context_uuid = request.context_id.context_uuid.uuid @@ -140,7 +128,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id}) return ContextId(**dict_context_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: with self.lock: context_uuid = request.context_uuid.uuid @@ -154,7 +142,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): yield ContextEvent(**json.loads(message.content)) @@ -162,7 +150,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Topology --------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: with self.lock: context_uuid = request.context_uuid.uuid @@ -171,7 +159,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_topologies = sorted(db_topologies, key=operator.attrgetter('pk')) return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: with self.lock: context_uuid = request.context_uuid.uuid @@ -180,14 +168,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_topologies = sorted(db_topologies, key=operator.attrgetter('pk')) return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: with self.lock: str_key = key_to_str([request.context_id.context_uuid.uuid, request.topology_uuid.uuid]) db_topology : TopologyModel = get_object(self.database, TopologyModel, str_key) return Topology(**db_topology.dump(include_devices=True, include_links=True)) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: with self.lock: context_uuid = request.topology_id.context_id.context_uuid.uuid @@ -224,7 +212,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) return TopologyId(**dict_topology_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: with self.lock: context_uuid = request.context_id.context_uuid.uuid @@ -239,7 +227,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): yield TopologyEvent(**json.loads(message.content)) @@ -247,21 +235,21 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Device ----------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList: with self.lock: db_devices : List[DeviceModel] = get_all_objects(self.database, DeviceModel) db_devices = sorted(db_devices, key=operator.attrgetter('pk')) return DeviceIdList(device_ids=[db_device.dump_id() for db_device in db_devices]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList: with self.lock: db_devices : List[DeviceModel] = get_all_objects(self.database, DeviceModel) db_devices = sorted(db_devices, key=operator.attrgetter('pk')) return DeviceList(devices=[db_device.dump() for db_device in db_devices]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device: with self.lock: device_uuid = request.device_uuid.uuid @@ -269,7 +257,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer return Device(**db_device.dump( include_config_rules=True, include_drivers=True, include_endpoints=True)) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId: with self.lock: device_uuid = request.device_id.device_uuid.uuid @@ -334,7 +322,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) return DeviceId(**dict_device_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: with self.lock: device_uuid = request.device_uuid.uuid @@ -349,7 +337,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): yield DeviceEvent(**json.loads(message.content)) @@ -357,28 +345,28 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Link ------------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList: with self.lock: db_links : List[LinkModel] = get_all_objects(self.database, LinkModel) db_links = sorted(db_links, key=operator.attrgetter('pk')) return LinkIdList(link_ids=[db_link.dump_id() for db_link in db_links]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList: with self.lock: db_links : List[LinkModel] = get_all_objects(self.database, LinkModel) db_links = sorted(db_links, key=operator.attrgetter('pk')) return LinkList(links=[db_link.dump() for db_link in db_links]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link: with self.lock: link_uuid = request.link_uuid.uuid db_link : LinkModel = get_object(self.database, LinkModel, link_uuid) return Link(**db_link.dump()) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId: with self.lock: link_uuid = request.link_id.link_uuid.uuid @@ -423,7 +411,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) return LinkId(**dict_link_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: with self.lock: link_uuid = request.link_uuid.uuid @@ -438,7 +426,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): yield LinkEvent(**json.loads(message.content)) @@ -446,7 +434,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Service ---------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList: with self.lock: db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) @@ -454,7 +442,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_services = sorted(db_services, key=operator.attrgetter('pk')) return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: with self.lock: db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) @@ -462,7 +450,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_services = sorted(db_services, key=operator.attrgetter('pk')) return ServiceList(services=[db_service.dump() for db_service in db_services]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service: with self.lock: str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) @@ -470,7 +458,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer return Service(**db_service.dump( include_endpoint_ids=True, include_constraints=True, include_config_rules=True)) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: with self.lock: context_uuid = request.service_id.context_id.context_uuid.uuid @@ -530,7 +518,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) return ServiceId(**dict_service_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: with self.lock: context_uuid = request.context_id.context_uuid.uuid @@ -546,7 +534,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): yield ServiceEvent(**json.loads(message.content)) @@ -554,7 +542,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Slice ---------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList: with self.lock: db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) @@ -562,7 +550,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_slices = sorted(db_slices, key=operator.attrgetter('pk')) return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList: with self.lock: db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) @@ -570,7 +558,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_slices = sorted(db_slices, key=operator.attrgetter('pk')) return SliceList(slices=[db_slice.dump() for db_slice in db_slices]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice: with self.lock: str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid]) @@ -579,7 +567,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer include_endpoint_ids=True, include_constraints=True, include_config_rules=True, include_service_ids=True, include_subslice_ids=True)) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: with self.lock: context_uuid = request.slice_id.context_id.context_uuid.uuid @@ -664,7 +652,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) return SliceId(**dict_slice_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UnsetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: with self.lock: context_uuid = request.slice_id.context_id.context_uuid.uuid @@ -713,7 +701,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) return SliceId(**dict_slice_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty: with self.lock: context_uuid = request.context_id.context_uuid.uuid @@ -729,7 +717,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT): yield SliceEvent(**json.loads(message.content)) @@ -737,7 +725,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Connection ------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: with self.lock: str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) @@ -746,7 +734,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_connections = sorted(db_connections, key=operator.attrgetter('pk')) return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListConnections(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: with self.lock: str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) @@ -755,13 +743,13 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer db_connections = sorted(db_connections, key=operator.attrgetter('pk')) return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection: with self.lock: db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid) return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True)) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId: with self.lock: connection_uuid = request.connection_id.connection_uuid.uuid @@ -800,7 +788,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) return ConnectionId(**dict_connection_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty: with self.lock: db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False) @@ -814,7 +802,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): yield ConnectionEvent(**json.loads(message.content)) @@ -822,28 +810,28 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Policy ----------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: with self.lock: db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList: with self.lock: db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: with self.lock: policy_rule_uuid = request.uuid.uuid db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) return PolicyRule(**db_policy_rule.dump()) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: with self.lock: policy_rule_type = request.WhichOneof('policy_rule') @@ -858,7 +846,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) return PolicyRuleId(**dict_policy_id) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty: with self.lock: policy_uuid = request.uuid.uuid diff --git a/src/dbscanserving/service/DbscanServiceServicerImpl.py b/src/dbscanserving/service/DbscanServiceServicerImpl.py index 5560eec1e333c16c0ea980a5af14e856c3909431..b14729d36d403a2b246183751d9ecb077a92caeb 100644 --- a/src/dbscanserving/service/DbscanServiceServicerImpl.py +++ b/src/dbscanserving/service/DbscanServiceServicerImpl.py @@ -14,15 +14,13 @@ import os, grpc, logging from sklearn.cluster import DBSCAN -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from dbscanserving.proto.dbscanserving_pb2 import DetectionRequest, DetectionResponse from dbscanserving.proto.dbscanserving_pb2_grpc import DetectorServicer LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'DbscanServing' -METHOD_NAMES = ['Detect'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('DbscanServing', 'RPC') class DbscanServiceServicerImpl(DetectorServicer): @@ -31,7 +29,7 @@ class DbscanServiceServicerImpl(DetectorServicer): LOGGER.debug('Creating Servicer...') LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def Detect(self, request : DetectionRequest, context : grpc.ServicerContext) -> DetectionResponse: if request.num_samples != len(request.samples): context.set_details("The sample dimension declared does not match with the number of samples received.") diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index d5d44f34ffb69a337b715a0884aea3770b3d3cec..88f49de6fb5c07e39b7efc9d26ccba135f95c929 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -14,6 +14,8 @@ import grpc, json, logging, re from typing import Any, Dict, List, Tuple +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.method_wrappers.ServiceExceptions import InvalidArgumentException, OperationFailedException from common.orm.Database import Database from common.orm.HighLevel import get_object, update_or_create_object from common.orm.backend.Tools import key_to_str @@ -21,8 +23,6 @@ from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig, Dev from common.proto.device_pb2 import MonitoringSettings from common.proto.device_pb2_grpc import DeviceServiceServicer from common.proto.kpi_sample_types_pb2 import KpiSampleType -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, OperationFailedException from common.tools.grpc.Tools import grpc_message_to_json from common.tools.mutex_queues.MutexQueues import MutexQueues from context.client.ContextClient import ContextClient @@ -44,9 +44,7 @@ from .MonitoringLoops import MonitoringLoops LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Device' -METHOD_NAMES = ['AddDevice', 'ConfigureDevice', 'DeleteDevice', 'GetInitialConfig', 'MonitorDeviceKpi'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Device', 'RPC') class DeviceServiceServicerImpl(DeviceServiceServicer): def __init__( @@ -60,7 +58,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): self.mutex_queues = MutexQueues() LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def AddDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: device_id = request.device_id device_uuid = device_id.device_uuid.uuid @@ -176,7 +174,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): finally: self.mutex_queues.signal_done(device_uuid) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ConfigureDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: device_id = request.device_id device_uuid = device_id.device_uuid.uuid @@ -243,7 +241,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): self.mutex_queues.signal_done(device_uuid) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: device_uuid = request.device_uuid.uuid @@ -288,7 +286,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): finally: self.mutex_queues.signal_done(device_uuid) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetInitialConfig(self, request : DeviceId, context : grpc.ServicerContext) -> DeviceConfig: device_uuid = request.device_uuid.uuid @@ -303,7 +301,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): finally: self.mutex_queues.signal_done(device_uuid) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def MonitorDeviceKpi(self, request : MonitoringSettings, context : grpc.ServicerContext) -> Empty: kpi_uuid = request.kpi_id.kpi_id.uuid device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid diff --git a/src/device/service/database/DatabaseTools.py b/src/device/service/database/DatabaseTools.py index 4409f078b78b7369702e262ec7e371adcd35a7cd..9d3b712cade921849a5b34be3a837e4f6697b76f 100644 --- a/src/device/service/database/DatabaseTools.py +++ b/src/device/service/database/DatabaseTools.py @@ -14,11 +14,11 @@ import grpc from typing import Any, Dict, Tuple +from common.method_wrappers.ServiceExceptions import InvalidArgumentException from common.orm.Database import Database from common.orm.HighLevel import get_or_create_object, update_or_create_object from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import Device, DeviceId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException from context.client.ContextClient import ContextClient from device.service.driver_api.FilterFields import FilterFieldEnum from .ConfigModel import delete_all_config_rules, grpc_config_rules_to_raw, update_config diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py index 2ee9a10ca93ceead96115528873c8876fadcf8ed..6029ff6604b2525b4509a24a2ec0d6f7c38513d0 100644 --- a/src/device/service/drivers/emulated/EmulatedDriver.py +++ b/src/device/service/drivers/emulated/EmulatedDriver.py @@ -19,6 +19,7 @@ from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.job import Job from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.background import BackgroundScheduler +from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk_type from device.service.database.KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type from device.service.driver_api._Driver import ( @@ -122,6 +123,24 @@ def do_sampling( value = abs(0.95 * waveform + 0.05 * noise) out_samples.put_nowait((timestamp, resource_key, value)) +HISTOGRAM_BUCKETS = ( + # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF + 0.0001, 0.00025, 0.00050, 0.00075, + 0.0010, 0.0025, 0.0050, 0.0075, + 0.0100, 0.0250, 0.0500, 0.0750, + 0.1000, 0.2500, 0.5000, 0.7500, + 1.0000, 2.5000, 5.0000, 7.5000, + 10.0, 25.0, 50.0, 75.0, + 100.0, INF +) +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'emulated'}) +METRICS_POOL.get_or_create('GetInitialConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('GetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('UnsubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) + class EmulatedDriver(_Driver): def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called self.__lock = threading.Lock() @@ -170,10 +189,12 @@ class EmulatedDriver(_Driver): self.__scheduler.shutdown() return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: with self.__lock: return dump_subtree(self.__initial) + @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) with self.__lock: @@ -197,6 +218,7 @@ class EmulatedDriver(_Driver): results.extend(dump_subtree(resource_node)) return results + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -231,6 +253,7 @@ class EmulatedDriver(_Driver): results.append(True) return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -268,6 +291,7 @@ class EmulatedDriver(_Driver): results.append(True) return results + @metered_subclass_method(METRICS_POOL) def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: chk_type('subscriptions', subscriptions, list) if len(subscriptions) == 0: return [] @@ -305,6 +329,7 @@ class EmulatedDriver(_Driver): results.append(True) return results + @metered_subclass_method(METRICS_POOL) def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: chk_type('subscriptions', subscriptions, list) if len(subscriptions) == 0: return [] diff --git a/src/device/service/drivers/microwave/IETFApiDriver.py b/src/device/service/drivers/microwave/IETFApiDriver.py index 4d5ec439f4085575fc0f7fddb228d30dab3010b5..3660eb7195c96eca3d95ed16f665ba6363c3d7b5 100644 --- a/src/device/service/drivers/microwave/IETFApiDriver.py +++ b/src/device/service/drivers/microwave/IETFApiDriver.py @@ -13,7 +13,9 @@ # limitations under the License. import logging, requests, threading +from requests.auth import HTTPBasicAuth from typing import Any, Iterator, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_string, chk_type from device.service.driver_api._Driver import _Driver from . import ALL_RESOURCE_KEYS @@ -21,20 +23,27 @@ from .Tools import create_connectivity_service, find_key, config_getter, delete_ LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'microwave'}) + class IETFApiDriver(_Driver): def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - self.__ietf_root = 'https://' + address + ':' + str(port) + username = settings.get('username') + password = settings.get('password') + self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None + scheme = settings.get('scheme', 'http') + self.__ietf_root = '{:s}://{:s}:{:d}'.format(scheme, address, int(port)) self.__timeout = int(settings.get('timeout', 120)) + self.__node_ids = set(settings.get('node_ids', [])) def Connect(self) -> bool: url = self.__ietf_root + '/nmswebs/restconf/data/ietf-network:networks' with self.__lock: if self.__started.is_set(): return True try: - requests.get(url, timeout=self.__timeout, verify=False) + requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth) except requests.exceptions.Timeout: LOGGER.exception('Timeout connecting {:s}'.format(str(self.__ietf_root))) return False @@ -50,10 +59,12 @@ class IETFApiDriver(_Driver): self.__terminate.set() return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: with self.__lock: return [] + @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) results = [] @@ -62,9 +73,12 @@ class IETFApiDriver(_Driver): for i, resource_key in enumerate(resource_keys): str_resource_name = 'resource_key[#{:d}]'.format(i) chk_string(str_resource_name, resource_key, allow_empty=False) - results.extend(config_getter(self.__ietf_root, resource_key, self.__timeout)) + results.extend(config_getter( + self.__ietf_root, resource_key, timeout=self.__timeout, auth=self.__auth, + node_ids=self.__node_ids)) return results + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] if len(resources) == 0: @@ -81,10 +95,12 @@ class IETFApiDriver(_Driver): uuid = find_key(resource, 'uuid') data = create_connectivity_service( - self.__ietf_root, self.__timeout, uuid, node_id_src, tp_id_src, node_id_dst, tp_id_dst, vlan_id) + self.__ietf_root, uuid, node_id_src, tp_id_src, node_id_dst, tp_id_dst, vlan_id, + timeout=self.__timeout, auth=self.__auth) results.extend(data) return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] if len(resources) == 0: return results @@ -92,13 +108,16 @@ class IETFApiDriver(_Driver): for resource in resources: LOGGER.info('resource = {:s}'.format(str(resource))) uuid = find_key(resource, 'uuid') - results.extend(delete_connectivity_service(self.__ietf_root, self.__timeout, uuid)) + results.extend(delete_connectivity_service( + self.__ietf_root, uuid, timeout=self.__timeout, auth=self.__auth)) return results + @metered_subclass_method(METRICS_POOL) def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: # TODO: IETF API Driver does not support monitoring by now return [False for _ in subscriptions] + @metered_subclass_method(METRICS_POOL) def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: # TODO: IETF API Driver does not support monitoring by now return [False for _ in subscriptions] diff --git a/src/device/service/drivers/microwave/Tools.py b/src/device/service/drivers/microwave/Tools.py index 4f74def4dd6c370a9d2bf07b1fbe85670f5c2956..a91c60af5bf9e7057aa8f3fe9a98947f84859cd3 100644 --- a/src/device/service/drivers/microwave/Tools.py +++ b/src/device/service/drivers/microwave/Tools.py @@ -13,6 +13,8 @@ # limitations under the License. import json, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Optional, Set from device.service.driver_api._Driver import RESOURCE_ENDPOINTS LOGGER = logging.getLogger(__name__) @@ -28,6 +30,8 @@ def find_key(resource, key): return json.loads(resource[1])[key] # this function exports only the endpoints which are not already involved in a microwave physical link +# TODO: improvement: create a Set[Tuple[node_id:str, tp_id:str]] containing the endpoints involved in links +# TODO: exportable endpoints are those not in this set. That will prevent looping through links for every endpoint def is_exportable_endpoint(node, termination_point_id, links): # for each link we check if the endpoint (termination_point_id) is already used by an existing link for link in links: @@ -39,7 +43,10 @@ def is_exportable_endpoint(node, termination_point_id, links): return False return True -def config_getter(root_url, resource_key, timeout): +def config_getter( + root_url : str, resource_key : str, auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None, + node_ids : Set[str] = set() +): # getting endpoints network_name = 'SIAE-ETH-TOPOLOGY' FIELDS = ''.join([ @@ -51,51 +58,53 @@ def config_getter(root_url, resource_key, timeout): url = URL_TEMPLATE.format(root_url, network_name, FIELDS) result = [] - try: - response = requests.get(url, timeout=timeout, verify=False) - except requests.exceptions.Timeout: - LOGGER.exception('Timeout connecting {:s}'.format(url)) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception retrieving {:s}'.format(resource_key)) - result.append((resource_key, e)) - else: - context = json.loads(response.content) - if resource_key == RESOURCE_ENDPOINTS: + if resource_key == RESOURCE_ENDPOINTS: + # getting existing endpoints + try: + response = requests.get(url, timeout=timeout, verify=False, auth=auth) + context = json.loads(response.content) network_instance = context.get('ietf-network:network', {}) links = network_instance.get('ietf-network-topology:link', []) - for sip in network_instance['node']: - tp = sip['ietf-network-topology:termination-point'] - node_id = sip['node-id'] - for te in tp: - tp_id = te['tp-id'] + for node in network_instance['node']: + node_id = node['node-id'] + if len(node_ids) > 0 and node_id not in node_ids: continue + tp_list = node['ietf-network-topology:termination-point'] + for tp in tp_list: + tp_id = tp['tp-id'] if not is_exportable_endpoint(node_id, tp_id, links): continue - resource_key = '/endpoints/endpoint[{:s}:{:s}]'.format(node_id,tp_id) - resource_value = {'uuid': tp_id, 'type': te['ietf-te-topology:te']['name']} + tp_uuid = '{:s}:{:s}'.format(node_id,tp_id) + resource_key = '/endpoints/endpoint[{:s}]'.format(tp_uuid) + resource_value = {'uuid': tp_uuid, 'type': tp['ietf-te-topology:te']['name']} result.append((resource_key, resource_value)) - - # getting created services - url = '{:s}/nmswebs/restconf/data/ietf-eth-tran-service:etht-svc'.format(root_url) - try: - response = requests.get(url, timeout=timeout, verify=False) - except requests.exceptions.Timeout: - LOGGER.exception('Timeout connecting {:s}'.format(url)) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception retrieving {:s}'.format(resource_key)) - result.append((resource_key, e)) + except requests.exceptions.Timeout: + LOGGER.exception('Timeout connecting {:s}'.format(url)) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Exception retrieving/parsing endpoints for {:s}'.format(resource_key)) + result.append((resource_key, e)) else: - context = json.loads(response.content) - if resource_key == RESOURCE_ENDPOINTS: + # getting created services + url = '{:s}/nmswebs/restconf/data/ietf-eth-tran-service:etht-svc'.format(root_url) + try: + response = requests.get(url, timeout=timeout, verify=False, auth=auth) + context = json.loads(response.content) etht_service = context.get('ietf-eth-tran-service:etht-svc', {}) service_instances = etht_service.get('etht-svc-instances', []) for service in service_instances: service_name = service['etht-svc-name'] resource_key = '/services/service[{:s}]'.format(service_name) - resource_value = {'uuid': service_name, 'type': service['etht-svc-type']} - result.append((resource_key, resource_value)) + result.append((resource_key, service)) + except requests.exceptions.Timeout: + LOGGER.exception('Timeout connecting {:s}'.format(url)) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Exception retrieving/parsing services for {:s}'.format(resource_key)) + result.append((resource_key, e)) + return result def create_connectivity_service( - root_url, timeout, uuid, node_id_src, tp_id_src, node_id_dst, tp_id_dst, vlan_id): + root_url, uuid, node_id_src, tp_id_src, node_id_dst, tp_id_dst, vlan_id, + auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None +): url = '{:s}/nmswebs/restconf/data/ietf-eth-tran-service:etht-svc'.format(root_url) headers = {'content-type': 'application/json'} @@ -128,7 +137,8 @@ def create_connectivity_service( results = [] try: LOGGER.info('Connectivity service {:s}: {:s}'.format(str(uuid), str(data))) - response = requests.post(url=url, data=json.dumps(data), timeout=timeout, headers=headers, verify=False) + response = requests.post( + url=url, data=json.dumps(data), timeout=timeout, headers=headers, verify=False, auth=auth) LOGGER.info('Microwave Driver response: {:s}'.format(str(response))) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Exception creating ConnectivityService(uuid={:s}, data={:s})'.format(str(uuid), str(data))) @@ -140,12 +150,12 @@ def create_connectivity_service( results.append(response.status_code in HTTP_OK_CODES) return results -def delete_connectivity_service(root_url, timeout, uuid): +def delete_connectivity_service(root_url, uuid, auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None): url = '{:s}/nmswebs/restconf/data/ietf-eth-tran-service:etht-svc/etht-svc-instances={:s}' url = url.format(root_url, uuid) results = [] try: - response = requests.delete(url=url, timeout=timeout, verify=False) + response = requests.delete(url=url, timeout=timeout, verify=False, auth=auth) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Exception deleting ConnectivityService(uuid={:s})'.format(str(uuid))) results.append(e) diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index 9342e650b9fadb21fa1b65fb951a08ae6f066a3c..4aa42b180d9816a9ecdf37a1ec351cb52b9ba41c 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -21,6 +21,7 @@ from apscheduler.job import Job from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.background import BackgroundScheduler from ncclient.manager import Manager, connect_ssh +from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.tools.client.RetryDecorator import delay_exponential from common.type_checkers.Checkers import chk_length, chk_string, chk_type, chk_float from device.service.driver_api.Exceptions import UnsupportedResourceKeyException @@ -222,6 +223,24 @@ def edit_config( results[i] = e # if validation fails, store the exception return results +HISTOGRAM_BUCKETS = ( + # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF + 0.0001, 0.00025, 0.00050, 0.00075, + 0.0010, 0.0025, 0.0050, 0.0075, + 0.0100, 0.0250, 0.0500, 0.0750, + 0.1000, 0.2500, 0.5000, 0.7500, + 1.0000, 2.5000, 5.0000, 7.5000, + 10.0, 25.0, 50.0, 75.0, + 100.0, INF +) +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'openconfig'}) +METRICS_POOL.get_or_create('GetInitialConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('GetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('UnsubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) + class OpenConfigDriver(_Driver): def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called self.__lock = threading.Lock() @@ -260,10 +279,12 @@ class OpenConfigDriver(_Driver): self.__netconf_handler.disconnect() return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: with self.__lock: return [] + @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) results = [] @@ -284,6 +305,7 @@ class OpenConfigDriver(_Driver): results.append((resource_key, e)) # if validation fails, store the exception return results + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -303,6 +325,7 @@ class OpenConfigDriver(_Driver): results = edit_config(self.__netconf_handler, resources) return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -322,6 +345,7 @@ class OpenConfigDriver(_Driver): results = edit_config(self.__netconf_handler, resources, delete=True) return results + @metered_subclass_method(METRICS_POOL) def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: chk_type('subscriptions', subscriptions, list) if len(subscriptions) == 0: return [] @@ -359,6 +383,7 @@ class OpenConfigDriver(_Driver): results.append(True) return results + @metered_subclass_method(METRICS_POOL) def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: chk_type('subscriptions', subscriptions, list) if len(subscriptions) == 0: return [] diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py index b8ff795fbd9466874b07f1f752fce682ea741111..606bb91ebe5e14804bbdd4f34e7c795c6cfd1b32 100644 --- a/src/device/service/drivers/p4/p4_driver.py +++ b/src/device/service/drivers/p4/p4_driver.py @@ -21,6 +21,7 @@ import json import logging import threading from typing import Any, Iterator, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_type, chk_length, chk_string from .p4_common import matches_ipv4, matches_ipv6, valid_port,\ P4_ATTR_DEV_ID, P4_ATTR_DEV_NAME, P4_ATTR_DEV_VENDOR,\ @@ -40,6 +41,7 @@ except ImportError: LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'p4'}) class P4Driver(_Driver): """ @@ -158,6 +160,7 @@ class P4Driver(_Driver): return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: """ Retrieve the initial configuration of a P4 device. @@ -172,6 +175,7 @@ class P4Driver(_Driver): self.__endpoint) return [] + @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys: List[str] = [])\ -> List[Tuple[str, Union[Any, None, Exception]]]: """ @@ -199,6 +203,7 @@ class P4Driver(_Driver): with self.__lock: return self.__get_resources(resource_keys) + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources: List[Tuple[str, Any]])\ -> List[Union[bool, Exception]]: """ @@ -222,6 +227,7 @@ class P4Driver(_Driver): with self.__lock: return self.__set_resources(resources) + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources: List[Tuple[str, Any]])\ -> List[Union[bool, Exception]]: """ @@ -268,6 +274,7 @@ class P4Driver(_Driver): LOGGER.warning("GetState() RPC not yet implemented by the P4 driver") return [] + @metered_subclass_method(METRICS_POOL) def SubscribeState(self, subscriptions: List[Tuple[str, float, float]])\ -> List[Union[bool, Exception]]: """ @@ -280,6 +287,7 @@ class P4Driver(_Driver): "SubscribeState() RPC not yet implemented by the P4 driver") return [False for _ in subscriptions] + @metered_subclass_method(METRICS_POOL) def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]])\ -> List[Union[bool, Exception]]: """ diff --git a/src/device/service/drivers/transport_api/Tools.py b/src/device/service/drivers/transport_api/Tools.py index 6ae928eb8f6bf8371dbf28b7ee9cc1995b3c191f..8989294194203d384348f4d2499252555fcb9aaa 100644 --- a/src/device/service/drivers/transport_api/Tools.py +++ b/src/device/service/drivers/transport_api/Tools.py @@ -47,7 +47,15 @@ def config_getter(root_url, resource_key, timeout): elif 'context' in context: context = context['context'] for sip in context['service-interface-point']: - endpoint_type = sip.get('layer-protocol-name', '10Gbps') + layer_protocol_name = sip.get('layer-protocol-name', '?') + supportable_spectrum = sip.get('tapi-photonic-media:media-channel-service-interface-point-spec', {}) + supportable_spectrum = supportable_spectrum.get('mc-pool', {}) + supportable_spectrum = supportable_spectrum.get('supportable-spectrum', []) + supportable_spectrum = supportable_spectrum[0] if len(supportable_spectrum) == 1 else {} + grid_type = supportable_spectrum.get('frequency-constraint', {}).get('grid-type') + granularity = supportable_spectrum.get('frequency-constraint', {}).get('adjustment-granularity') + direction = sip.get('direction', '?') + endpoint_type = ':'.join([layer_protocol_name, grid_type, granularity, direction]) endpoint_url = '/endpoints/endpoint[{:s}]'.format(sip['uuid']) endpoint_data = {'uuid': sip['uuid'], 'type': endpoint_type} result.append((endpoint_url, endpoint_data)) diff --git a/src/device/service/drivers/transport_api/TransportApiDriver.py b/src/device/service/drivers/transport_api/TransportApiDriver.py index b0ecfe32f63b2568d7b5fb7498e532935aee018c..71d7aa33678cb945443565e1766de3234d947ef8 100644 --- a/src/device/service/drivers/transport_api/TransportApiDriver.py +++ b/src/device/service/drivers/transport_api/TransportApiDriver.py @@ -14,6 +14,7 @@ import logging, requests, threading from typing import Any, Iterator, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_string, chk_type from device.service.driver_api._Driver import _Driver from . import ALL_RESOURCE_KEYS @@ -21,6 +22,8 @@ from .Tools import create_connectivity_service, find_key, config_getter, delete_ LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'transport_api'}) + class TransportApiDriver(_Driver): def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called self.__lock = threading.Lock() @@ -50,10 +53,12 @@ class TransportApiDriver(_Driver): self.__terminate.set() return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: with self.__lock: return [] + @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) results = [] @@ -65,6 +70,7 @@ class TransportApiDriver(_Driver): results.extend(config_getter(self.__tapi_root, resource_key, self.__timeout)) return results + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] if len(resources) == 0: @@ -88,6 +94,7 @@ class TransportApiDriver(_Driver): results.extend(data) return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] if len(resources) == 0: return results @@ -98,10 +105,12 @@ class TransportApiDriver(_Driver): results.extend(delete_connectivity_service(self.__tapi_root, self.__timeout, uuid)) return results + @metered_subclass_method(METRICS_POOL) def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: # TODO: TAPI does not support monitoring by now return [False for _ in subscriptions] + @metered_subclass_method(METRICS_POOL) def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: # TODO: TAPI does not support monitoring by now return [False for _ in subscriptions] diff --git a/src/device/service/drivers/xr/XrDriver.py b/src/device/service/drivers/xr/XrDriver.py index 51fd29ad11af5ccdad7e5c49e7d069a1bf2e8ffb..1c1ee7d865715c7455969745987378f825aafbcb 100644 --- a/src/device/service/drivers/xr/XrDriver.py +++ b/src/device/service/drivers/xr/XrDriver.py @@ -18,6 +18,7 @@ import threading import json from typing import Any, Iterator, List, Optional, Tuple, Union import urllib3 +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_type from device.service.driver_api._Driver import _Driver from .cm.cm_connection import CmConnection @@ -29,6 +30,8 @@ urllib3.disable_warnings() LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'xr'}) + class XrDriver(_Driver): def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called self.__lock = threading.Lock() @@ -74,6 +77,7 @@ class XrDriver(_Driver): return [] #pylint: disable=dangerous-default-value + @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: LOGGER.info(f"GetConfig[{self}]: {resource_keys=}") chk_type('resources', resource_keys, list) @@ -89,6 +93,7 @@ class XrDriver(_Driver): else: return [] + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: LOGGER.info(f"SetConfig[{self}]: {resources=}") # Logged config seems like: @@ -116,6 +121,7 @@ class XrDriver(_Driver): return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: LOGGER.info(f"DeleteConfig[{self}]: {resources=}") @@ -156,10 +162,12 @@ class XrDriver(_Driver): return results + @metered_subclass_method(METRICS_POOL) def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: # Not supported return [False for _ in subscriptions] + @metered_subclass_method(METRICS_POOL) def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: # Not supported return [False for _ in subscriptions] diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py index 6c5401cb1724f8a759001d790e835ab78ce4c6c6..9af1ae6ead6fa66722e9d92d96ed07e2731c5ab4 100644 --- a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py +++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py @@ -13,11 +13,12 @@ # limitations under the License. import grpc, logging -from common.proto.context_pb2 import DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId +from typing import Optional +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.context_pb2 import Empty, TopologyId from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer from common.proto.dlt_gateway_pb2 import DltRecord, DltRecordId, DltRecordOperationEnum, DltRecordTypeEnum -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from dlt.connector.client.DltGatewayClient import DltGatewayClient @@ -25,137 +26,122 @@ from .tools.Checkers import record_exists LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'DltConnector' -METHOD_NAMES = [ - 'RecordAll', - 'RecordAllDevices', 'RecordDevice', - 'RecordAllLinks', 'RecordLink', - 'RecordAllServices', 'RecordService', - 'RecordAllSlices', 'RecordSlice', -] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('DltConnector', 'RPC') class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordAll(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordAllDevices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordDevice(self, request : DltDeviceId, context : grpc.ServicerContext) -> Empty: - context_client = ContextClient() - device = context_client.GetDevice(request.device_id) - - dltgateway_client = DltGatewayClient() - - dlt_record_id = DltRecordId() - dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid - dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_DEVICE - dlt_record_id.record_uuid.uuid = device.device_id.device_uuid.uuid - - LOGGER.info('[RecordDevice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) - dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) - LOGGER.info('[RecordDevice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) - - exists = record_exists(dlt_record) - LOGGER.info('[RecordDevice] exists = {:s}'.format(str(exists))) - - dlt_record = DltRecord() - dlt_record.record_id.CopyFrom(dlt_record_id) - dlt_record.operation = \ - DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ - if exists else \ - DltRecordOperationEnum.DLTRECORDOPERATION_ADD - - dlt_record.data_json = grpc_message_to_json_string(device) - LOGGER.info('[RecordDevice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) - dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) - LOGGER.info('[RecordDevice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) + data_json = None + if not request.delete: + context_client = ContextClient() + device = context_client.GetDevice(request.device_id) + data_json = grpc_message_to_json_string(device) + + self._record_entity( + request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_DEVICE, + request.device_id.device_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordAllLinks(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordLink(self, request : DltLinkId, context : grpc.ServicerContext) -> Empty: - context_client = ContextClient() - link = context_client.GetLink(request.link_id) - - dltgateway_client = DltGatewayClient() - - dlt_record_id = DltRecordId() - dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid - dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_LINK - dlt_record_id.record_uuid.uuid = link.link_id.link_uuid.uuid - - LOGGER.info('[RecordLink] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) - dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) - LOGGER.info('[RecordLink] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) - - exists = record_exists(dlt_record) - LOGGER.info('[RecordLink] exists = {:s}'.format(str(exists))) - - dlt_record = DltRecord() - dlt_record.record_id.CopyFrom(dlt_record_id) - dlt_record.operation = \ - DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ - if exists else \ - DltRecordOperationEnum.DLTRECORDOPERATION_ADD - - dlt_record.data_json = grpc_message_to_json_string(link) - LOGGER.info('[RecordLink] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) - dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) - LOGGER.info('[RecordLink] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) + data_json = None + if not request.delete: + context_client = ContextClient() + link = context_client.GetLink(request.link_id) + data_json = grpc_message_to_json_string(link) + + self._record_entity( + request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_LINK, + request.link_id.link_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordAllServices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordService(self, request : DltServiceId, context : grpc.ServicerContext) -> Empty: + data_json = None + if not request.delete: + context_client = ContextClient() + service = context_client.GetService(request.service_id) + data_json = grpc_message_to_json_string(service) + + self._record_entity( + request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_SERVICE, + request.service_id.service_uuid.uuid, request.delete, data_json) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordAllSlices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecordSlice(self, request : DltSliceId, context : grpc.ServicerContext) -> Empty: - context_client = ContextClient() - slice_ = context_client.GetSlice(request.slice_id) + data_json = None + if not request.delete: + context_client = ContextClient() + slice_ = context_client.GetSlice(request.slice_id) + data_json = grpc_message_to_json_string(slice_) + + self._record_entity( + request.topology_id.topology_uuid.uuid, DltRecordTypeEnum.DLTRECORDTYPE_SLICE, + request.slice_id.slice_uuid.uuid, request.delete, data_json) + return Empty() + def _record_entity( + self, dlt_domain_uuid : str, dlt_record_type : DltRecordTypeEnum, dlt_record_uuid : str, delete : bool, + data_json : Optional[str] = None + ) -> None: dltgateway_client = DltGatewayClient() dlt_record_id = DltRecordId() - dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid - dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_SLICE - dlt_record_id.record_uuid.uuid = slice_.slice_id.slice_uuid.uuid + dlt_record_id.domain_uuid.uuid = dlt_domain_uuid # pylint: disable=no-member + dlt_record_id.type = dlt_record_type + dlt_record_id.record_uuid.uuid = dlt_record_uuid # pylint: disable=no-member - LOGGER.info('[RecordSlice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) + str_dlt_record_id = grpc_message_to_json_string(dlt_record_id) + LOGGER.debug('[_record_entity] sent dlt_record_id = {:s}'.format(str_dlt_record_id)) dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) - LOGGER.info('[RecordSlice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + str_dlt_record = grpc_message_to_json_string(dlt_record) + LOGGER.debug('[_record_entity] recv dlt_record = {:s}'.format(str_dlt_record)) exists = record_exists(dlt_record) - LOGGER.info('[RecordSlice] exists = {:s}'.format(str(exists))) + LOGGER.debug('[_record_entity] exists = {:s}'.format(str(exists))) dlt_record = DltRecord() - dlt_record.record_id.CopyFrom(dlt_record_id) - dlt_record.operation = \ - DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ - if exists else \ - DltRecordOperationEnum.DLTRECORDOPERATION_ADD - - dlt_record.data_json = grpc_message_to_json_string(slice_) - LOGGER.info('[RecordSlice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + dlt_record.record_id.CopyFrom(dlt_record_id) # pylint: disable=no-member + if delete and exists: + dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_DELETE + elif not delete and exists: + dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE + if data_json is None: raise Exception('data_json must be provided when updating') + dlt_record.data_json = data_json + elif not delete and not exists: + dlt_record.operation = DltRecordOperationEnum.DLTRECORDOPERATION_ADD + if data_json is None: raise Exception('data_json must be provided when adding') + dlt_record.data_json = data_json + else: + return + + str_dlt_record = grpc_message_to_json_string(dlt_record) + LOGGER.debug('[_record_entity] sent dlt_record = {:s}'.format(str_dlt_record)) dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) - LOGGER.info('[RecordSlice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) - return Empty() + str_dlt_record_status = grpc_message_to_json_string(dlt_record_status) + LOGGER.debug('[_record_entity] recv dlt_record_status = {:s}'.format(str_dlt_record_status)) diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py index a178095aeee81c3e6407cf1c6706b047fd1c65fc..3fb3e72b9b7e2f4c8ea03f0bb2dfce76ac4fcbcd 100644 --- a/src/interdomain/service/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/InterdomainServiceServicerImpl.py @@ -16,7 +16,7 @@ import grpc, logging, uuid from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.tools.context_queries.Context import create_context from common.tools.context_queries.InterDomain import ( compute_interdomain_path, compute_traversed_domains, get_local_device_uuids, is_inter_domain, is_multi_domain) @@ -33,9 +33,7 @@ from .Tools import compose_slice, compute_slice_owner, map_abstract_endpoints_to LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Interdomain' -METHOD_NAMES = ['RequestSlice', 'Authenticate', 'LookUpSlice', 'OrderSliceFromCatalog', 'CreateSliceAndAddToCatalog'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Interdomain', 'RPC') class InterdomainServiceServicerImpl(InterdomainServiceServicer): def __init__(self, remote_domain_clients : RemoteDomainClients): @@ -43,7 +41,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): self.remote_domain_clients = remote_domain_clients LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: context_client = ContextClient() pathcomp_client = PathCompClient() @@ -133,14 +131,14 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): slice_id = context_client.SetSlice(reply) return slice_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: auth_result = AuthenticationResult() auth_result.context_id.CopyFrom(request.context_id) # pylint: disable=no-member auth_result.authenticated = True return auth_result - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def LookUpSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: try: context_client = ContextClient() @@ -150,12 +148,12 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): #LOGGER.exception('Unable to get slice({:s})'.format(grpc_message_to_json_string(request.slice_id))) return SliceId() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def OrderSliceFromCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: raise NotImplementedError('OrderSliceFromCatalog') #return Slice() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: context_client = ContextClient() slice_client = SliceClient() diff --git a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py index 01ba90ef5a6cb098e6d419fa0d6abb450893f8c6..f3818578186360365e3b828810d942def5722cea 100644 --- a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py @@ -13,7 +13,7 @@ # limitations under the License. import grpc, logging -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.context_pb2 import ( AuthenticationResult, Slice, SliceId, SliceStatus, SliceStatusEnum, TeraFlowController) from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer @@ -24,9 +24,7 @@ from slice.client.SliceClient import SliceClient LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Interdomain' -METHOD_NAMES = ['RequestSlice', 'Authenticate', 'LookUpSlice', 'OrderSliceFromCatalog', 'CreateSliceAndAddToCatalog'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Interdomain', 'RPC') class InterdomainServiceServicerImpl(InterdomainServiceServicer): def __init__(self, remote_domain_clients : RemoteDomainClients): @@ -34,7 +32,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): self.remote_domain_clients = remote_domain_clients LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: context_client = ContextClient() slice_client = SliceClient() @@ -121,14 +119,14 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): context_client.SetSlice(reply) return reply.slice_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: auth_result = AuthenticationResult() auth_result.context_id.CopyFrom(request.context_id) # pylint: disable=no-member auth_result.authenticated = True return auth_result - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def LookUpSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: try: context_client = ContextClient() @@ -138,12 +136,12 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): #LOGGER.exception('Unable to get slice({:s})'.format(grpc_message_to_json_string(request.slice_id))) return SliceId() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def OrderSliceFromCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: raise NotImplementedError('OrderSliceFromCatalog') #return Slice() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: context_client = ContextClient() slice_client = SliceClient() diff --git a/src/monitoring/service/EventTools.py b/src/monitoring/service/EventTools.py index 4999d2a95991d79ed5417948e220d35aa668c653..189e78ce617c69dc4514e9e0b713dece10ef9669 100644 --- a/src/monitoring/service/EventTools.py +++ b/src/monitoring/service/EventTools.py @@ -17,7 +17,7 @@ from queue import Queue import grpc -from common.rpc_method_wrapper.ServiceExceptions import ServiceException +from common.method_wrappers.ServiceExceptions import ServiceException from context.client.ContextClient import ContextClient from common.proto.context_pb2 import Empty, EventTypeEnum diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py index 548f34c8a07a1d8df17f2702879dbbadf60f6d13..c2bceefd794e3c5bd6acb35e41cef78dc1c205e9 100644 --- a/src/monitoring/service/MonitoringServiceServicerImpl.py +++ b/src/monitoring/service/MonitoringServiceServicerImpl.py @@ -27,7 +27,7 @@ from common.proto.monitoring_pb2_grpc import MonitoringServiceServicer from common.proto.monitoring_pb2 import AlarmResponse, AlarmDescriptor, AlarmList, SubsList, KpiId, \ KpiDescriptor, KpiList, KpiQuery, SubsDescriptor, SubscriptionID, AlarmID, KpiDescriptorList, \ MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse, RawKpiTable, RawKpi, RawKpiList -from common.rpc_method_wrapper.ServiceExceptions import ServiceException +from common.method_wrappers.ServiceExceptions import ServiceException from common.tools.timestamp.Converters import timestamp_string_to_float, timestamp_utcnow_to_float from monitoring.service import ManagementDBTools, MetricsDBTools diff --git a/src/opticalattackmitigator/service/OpticalAttackMitigatorServiceServicerImpl.py b/src/opticalattackmitigator/service/OpticalAttackMitigatorServiceServicerImpl.py index 4a2dd041b52eaf89bda65acb7ae1e46beed8c48a..39a783ac40218930b1c08bd0a1cf55788ce7f0b9 100644 --- a/src/opticalattackmitigator/service/OpticalAttackMitigatorServiceServicerImpl.py +++ b/src/opticalattackmitigator/service/OpticalAttackMitigatorServiceServicerImpl.py @@ -14,16 +14,14 @@ import os, grpc, logging, random from influxdb import InfluxDBClient -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from opticalattackmitigator.proto.optical_attack_mitigator_pb2_grpc import ( AttackMitigatorServicer) from opticalattackmitigator.proto.optical_attack_mitigator_pb2 import AttackDescription, AttackResponse LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'OpticalAttackMitigator' -METHOD_NAMES = ['NotifyAttack'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('OpticalAttackMitigator', 'RPC') class OpticalAttackMitigatorServiceServicerImpl(AttackMitigatorServicer): @@ -32,7 +30,7 @@ class OpticalAttackMitigatorServiceServicerImpl(AttackMitigatorServicer): LOGGER.debug('Creating Servicer...') LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def NotifyAttack(self, request : AttackDescription, context : grpc.ServicerContext) -> AttackResponse: LOGGER.debug(f"NotifyAttack: {request}") response: AttackResponse = AttackResponse() diff --git a/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py b/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py index d4c71476f016081f7d230a3cfe87e73b35654987..0009f8d9128983b412ecadb3f1011faa0d29dd88 100644 --- a/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py +++ b/src/opticalcentralizedattackdetector/service/OpticalCentralizedAttackDetectorServiceServicerImpl.py @@ -14,7 +14,7 @@ import os, grpc, logging, random from influxdb import InfluxDBClient -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient from service.client.ServiceClient import ServiceClient @@ -37,9 +37,7 @@ from opticalcentralizedattackdetector.Config import ( LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'OpticalCentralizedAttackDetector' -METHOD_NAMES = ['NotifyServiceUpdate', 'DetectAttack', 'ReportSummarizedKpi', 'ReportKpi'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('OpticalCentralizedAttackDetector', 'RPC') INFLUXDB_HOSTNAME = os.environ.get("INFLUXDB_HOSTNAME") INFLUXDB_USER = os.environ.get("INFLUXDB_USER") @@ -63,11 +61,11 @@ class OpticalCentralizedAttackDetectorServiceServicerImpl(OpticalCentralizedAtta LOGGER.debug('Creating Servicer...') LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def NotifyServiceUpdate(self, request : Service, context : grpc.ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DetectAttack(self, request : Empty, context : grpc.ServicerContext) -> Empty: # retrieve list with current contexts @@ -131,10 +129,10 @@ class OpticalCentralizedAttackDetectorServiceServicerImpl(OpticalCentralizedAtta # if attack is detected, run the attack mitigator return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ReportSummarizedKpi(self, request : KpiList, context : grpc.ServicerContext) -> Empty: return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ReportKpi(self, request : KpiList, context : grpc.ServicerContext) -> Empty: return Empty() diff --git a/src/pathcomp/backend/pathComp_tools.h b/src/pathcomp/backend/pathComp_tools.h index 8fe704c3932c219e0f04046fcc62d6f1da5f9b66..adbbf30c4fda48564c126369b0aace839cdf5d93 100644 --- a/src/pathcomp/backend/pathComp_tools.h +++ b/src/pathcomp/backend/pathComp_tools.h @@ -121,7 +121,7 @@ struct map_nodes_t { }; #define MAX_NUM_VERTICES 20 // 100 # LGR: reduced from 100 to 20 to divide by 5 the memory used -#define MAX_NUM_EDGES 10 // 100 # LGR: reduced from 100 to 10 to divide by 10 the memory used +#define MAX_NUM_EDGES 20 // 100 # LGR: reduced from 100 to 20 to divide by 5 the memory used // Structures for the graph composition struct targetNodes_t { // remote / targeted node diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py index 205306d0ec2d156a2050d1f95c5c1e990796e018..ca4132754fc4886704cb2984519ebc21a19bfd9c 100644 --- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py +++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging +import grpc, logging, threading from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.context_pb2 import ContextId, Empty from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.tools.context_queries.Device import get_devices_in_topology from common.tools.context_queries.Link import get_links_in_topology from common.tools.context_queries.InterDomain import is_inter_domain @@ -28,20 +28,19 @@ from pathcomp.frontend.service.algorithms.Factory import get_algorithm LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'PathComp' -METHOD_NAMES = ['Compute'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('PathComp', 'RPC') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) class PathCompServiceServicerImpl(PathCompServiceServicer): def __init__(self) -> None: LOGGER.debug('Creating Servicer...') + self._lock = threading.Lock() LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def Compute(self, request : PathCompRequest, context : grpc.ServicerContext) -> PathCompReply: - LOGGER.info('[Compute] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) + LOGGER.debug('[Compute] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) context_client = ContextClient() @@ -68,8 +67,10 @@ class PathCompServiceServicerImpl(PathCompServiceServicer): #import time #ts = time.time() #algorithm.execute('request-{:f}.json'.format(ts), 'reply-{:f}.json'.format(ts)) - algorithm.execute() + with self._lock: + # ensure backend receives requests one at a time + algorithm.execute() reply = algorithm.get_reply() - LOGGER.info('[Compute] end ; reply = {:s}'.format(grpc_message_to_json_string(reply))) + LOGGER.debug('[Compute] end ; reply = {:s}'.format(grpc_message_to_json_string(reply))) return reply diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index 3833642457bc5f8c2ba7b7d09f384a87dfabe41d..a24ef769313c7d71d28e6bcc5526cbc398e05c08 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -93,22 +93,22 @@ class _Algorithm: def execute(self, dump_request_filename : Optional[str] = None, dump_reply_filename : Optional[str] = None) -> None: request = {'serviceList': self.service_list, 'deviceList': self.device_list, 'linkList': self.link_list} - self.logger.info('[execute] request={:s}'.format(str(request))) + self.logger.debug('[execute] request={:s}'.format(str(request))) if dump_request_filename is not None: with open(dump_request_filename, 'w', encoding='UTF-8') as f: f.write(json.dumps(request, sort_keys=True, indent=4)) - self.logger.info('[execute] BACKEND_URL: {:s}'.format(str(BACKEND_URL))) + self.logger.debug('[execute] BACKEND_URL: {:s}'.format(str(BACKEND_URL))) reply = requests.post(BACKEND_URL, json=request) self.status_code = reply.status_code self.raw_reply = reply.content.decode('UTF-8') - self.logger.info('[execute] status_code={:s} reply={:s}'.format(str(reply.status_code), str(self.raw_reply))) + self.logger.debug('[execute] status_code={:s} reply={:s}'.format(str(reply.status_code), str(self.raw_reply))) if dump_reply_filename is not None: with open(dump_reply_filename, 'w', encoding='UTF-8') as f: f.write('status_code={:s} reply={:s}'.format(str(self.status_code), str(self.raw_reply))) - if reply.status_code not in {requests.codes.ok}: + if reply.status_code not in {requests.codes.ok}: # pylint: disable=no-member raise Exception('Backend error({:s}) for request({:s})'.format( str(self.raw_reply), json.dumps(request, sort_keys=True))) diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 71fe14f53395e2ac57884911fe846c9c1b2c2834..bf152027037af46283c0901f0701ffb83d8a508e 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -14,11 +14,11 @@ import grpc, json, logging from typing import Optional +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.method_wrappers.ServiceExceptions import AlreadyExistsException, InvalidArgumentException from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatusEnum from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.service_pb2_grpc import ServiceServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import AlreadyExistsException, InvalidArgumentException from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string from context.client.ContextClient import ContextClient from pathcomp.frontend.client.PathCompClient import PathCompClient @@ -28,9 +28,7 @@ from .task_scheduler.TaskScheduler import TasksScheduler LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Service' -METHOD_NAMES = ['CreateService', 'UpdateService', 'DeleteService'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Service', 'RPC') class ServiceServiceServicerImpl(ServiceServiceServicer): def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None: @@ -38,7 +36,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): self.service_handler_factory = service_handler_factory LOGGER.debug('Servicer Created') - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CreateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: LOGGER.info('[CreateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) @@ -89,7 +87,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): service_id = context_client.SetService(request) return service_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: LOGGER.info('[UpdateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) @@ -118,9 +116,10 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): else: pathcomp_request.k_disjoint_path.num_disjoint = num_disjoint_paths - pathcomp = PathCompClient() LOGGER.info('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) + pathcomp = PathCompClient() pathcomp_reply = pathcomp.Compute(pathcomp_request) + pathcomp.close() LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) # Feed TaskScheduler with this path computation reply. TaskScheduler identifies inter-dependencies among @@ -131,7 +130,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): tasks_scheduler.execute_all() return request.service_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[DeleteService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) diff --git a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py index 19deabda3c8ddcd9f252098570ec07f82bef65a7..bc628c160eaaa9ac282c81bd4c0e02536e88a80c 100644 --- a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py @@ -14,6 +14,7 @@ import anytree, json, logging from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_length, chk_type @@ -24,6 +25,23 @@ from .ConfigRules import setup_config_rules, teardown_config_rules LOGGER = logging.getLogger(__name__) +HISTOGRAM_BUCKETS = ( + # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF + 0.0010, 0.0025, 0.0050, 0.0075, + 0.0100, 0.0250, 0.0500, 0.0750, + 0.1000, 0.2500, 0.5000, 0.7500, + 1.0000, 2.5000, 5.0000, 7.5000, + 10.0000, 25.000, 50.0000, 75.000, + 100.0, INF +) +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l2nm_emulated'}) +METRICS_POOL.get_or_create('SetEndpoint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteEndpoint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) + class L2NMEmulatedServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service : Service, task_executor : TaskExecutor, **settings @@ -46,6 +64,7 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) + @metered_subclass_method(METRICS_POOL) def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -80,6 +99,7 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -114,6 +134,7 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -122,6 +143,7 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -130,6 +152,7 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -147,6 +170,7 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] diff --git a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py index 54fb52630c68154865513d3969cdee9a06848c01..f161225192dfe7f9eb0804b9d9bff4e5acba9e21 100644 --- a/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_emulated/L3NMEmulatedServiceHandler.py @@ -14,6 +14,7 @@ import anytree, json, logging from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_length, chk_type @@ -24,6 +25,23 @@ from .ConfigRules import setup_config_rules, teardown_config_rules LOGGER = logging.getLogger(__name__) +HISTOGRAM_BUCKETS = ( + # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF + 0.0010, 0.0025, 0.0050, 0.0075, + 0.0100, 0.0250, 0.0500, 0.0750, + 0.1000, 0.2500, 0.5000, 0.7500, + 1.0000, 2.5000, 5.0000, 7.5000, + 10.0000, 25.000, 50.0000, 75.000, + 100.0, INF +) +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_emulated'}) +METRICS_POOL.get_or_create('SetEndpoint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteEndpoint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) + class L3NMEmulatedServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service : Service, task_executor : TaskExecutor, **settings @@ -46,6 +64,7 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) + @metered_subclass_method(METRICS_POOL) def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -80,6 +99,7 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -114,6 +134,7 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -122,6 +143,7 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -130,6 +152,7 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -147,6 +170,7 @@ class L3NMEmulatedServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] diff --git a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py index bdf6881647ef1a0861a312496c45512c8734afd9..0f5cb6c558c1515b81d011074ecda7e167c47e90 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py @@ -14,6 +14,7 @@ import anytree, json, logging from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_length, chk_type @@ -24,6 +25,23 @@ from .ConfigRules import setup_config_rules, teardown_config_rules LOGGER = logging.getLogger(__name__) +HISTOGRAM_BUCKETS = ( + # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF + 0.0010, 0.0025, 0.0050, 0.0075, + 0.0100, 0.0250, 0.0500, 0.0750, + 0.1000, 0.2500, 0.5000, 0.7500, + 1.0000, 2.5000, 5.0000, 7.5000, + 10.0000, 25.000, 50.0000, 75.000, + 100.0, INF +) +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_openconfig'}) +METRICS_POOL.get_or_create('SetEndpoint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteEndpoint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConstraint', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('SetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) +METRICS_POOL.get_or_create('DeleteConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS) + class L3NMOpenConfigServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service : Service, task_executor : TaskExecutor, **settings @@ -46,6 +64,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) + @metered_subclass_method(METRICS_POOL) def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -80,6 +99,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -114,6 +134,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -122,6 +143,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -130,6 +152,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -147,6 +170,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] diff --git a/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py index 1ae08bbf6a7b0f6aeedbf9d571dfbc154e22dace..fb54a1bc1db3071e88fd26e935c7779c7c2f19ee 100644 --- a/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py +++ b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py @@ -14,6 +14,7 @@ import anytree, json, logging from typing import Any, Dict, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from common.tools.object_factory.Device import json_device_id @@ -30,6 +31,8 @@ def check_endpoint(endpoint : str, service_uuid : str) -> Tuple[str, str]: raise Exception('Endpoint({:s}) is malformed for Service({:s})'.format(str(endpoint), str(service_uuid))) return endpoint_split +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'microwave'}) + class MicrowaveServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service : Service, task_executor : TaskExecutor, **settings @@ -52,6 +55,7 @@ class MicrowaveServiceHandler(_ServiceHandler): elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) + @metered_subclass_method(METRICS_POOL) def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -77,7 +81,7 @@ class MicrowaveServiceHandler(_ServiceHandler): device_uuid = endpoints[0][0] device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) - json_config_rule = json_config_rule_set('/service[{:s}]'.format(service_uuid), { + json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), { 'uuid' : service_uuid, 'node_id_src': node_id_src, 'tp_id_src' : tp_id_src, @@ -95,6 +99,7 @@ class MicrowaveServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -106,11 +111,13 @@ class MicrowaveServiceHandler(_ServiceHandler): results = [] try: chk_type('endpoints', endpoints, list) - if len(endpoints) != 2: raise Exception('len(endpoints) != 2') + if len(endpoints) < 1: raise Exception('len(endpoints) < 1') device_uuid = endpoints[0][0] device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) - json_config_rule = json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid}) + json_config_rule = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), { + 'uuid': service_uuid + }) del device.device_config.config_rules[:] device.device_config.config_rules.append(ConfigRule(**json_config_rule)) self.__task_executor.configure_device(device) @@ -121,6 +128,7 @@ class MicrowaveServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -129,6 +137,7 @@ class MicrowaveServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -137,6 +146,7 @@ class MicrowaveServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -154,6 +164,7 @@ class MicrowaveServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] diff --git a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py index f94948129f23c1aff3a3db3dbb4c236ae161e5e1..24371203ad599d7ad9a7f66e5ad96874471be00b 100644 --- a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py +++ b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py @@ -14,6 +14,7 @@ import anytree, json, logging from typing import Any, Dict, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from common.tools.object_factory.Device import json_device_id @@ -24,6 +25,8 @@ from service.service.task_scheduler.TaskExecutor import TaskExecutor LOGGER = logging.getLogger(__name__) +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'tapi_tapi'}) + class TapiServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service : Service, task_executor : TaskExecutor, **settings @@ -46,6 +49,7 @@ class TapiServiceHandler(_ServiceHandler): elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) + @metered_subclass_method(METRICS_POOL) def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -89,6 +93,7 @@ class TapiServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: @@ -114,6 +119,7 @@ class TapiServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -122,6 +128,7 @@ class TapiServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] @@ -130,6 +137,7 @@ class TapiServiceHandler(_ServiceHandler): LOGGER.warning(msg.format(str(constraints))) return [True for _ in range(len(constraints))] + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] @@ -147,6 +155,7 @@ class TapiServiceHandler(_ServiceHandler): return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: return [] diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index 757a660590dde1b3fb2eee7090b2329cd45ec8cb..7c96eb665e75a08f2a47fa9d78a0bd9cc37a876e 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -14,8 +14,8 @@ from enum import Enum from typing import TYPE_CHECKING, Any, Dict, Optional, Union +from common.method_wrappers.ServiceExceptions import NotFoundException from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py index beb7e5a0426b7705dbf780d8305a587a3d4fec14..cfafd54e51f73b78d18d13a1c6d9e2c18ac4c944 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from common.method_wrappers.ServiceExceptions import OperationFailedException from common.proto.context_pb2 import ConnectionId -from common.rpc_method_wrapper.ServiceExceptions import OperationFailedException from common.tools.grpc.Tools import grpc_message_to_json_string from service.service.service_handler_api.Tools import check_errors_setendpoint from service.service.task_scheduler.TaskExecutor import TaskExecutor diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py index c04d950a8993166c3bbfab3c083d4f2898dcd3e8..4c8b75b2f365724215a690b97e98198405f4632c 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from common.method_wrappers.ServiceExceptions import OperationFailedException from common.proto.context_pb2 import ConnectionId -from common.rpc_method_wrapper.ServiceExceptions import OperationFailedException from common.tools.grpc.Tools import grpc_message_to_json_string from service.service.service_handler_api.Tools import check_errors_deleteendpoint from service.service.task_scheduler.TaskExecutor import TaskExecutor diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index ada7218588391766147a02f9713b540016522aa7..aa41a77ac7b5ce1ec6dabba0f841692ce2f8f42e 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -16,7 +16,7 @@ import grpc, json, logging #, deepdiff from common.proto.context_pb2 import ( Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Slice, SliceId, SliceStatusEnum) from common.proto.slice_pb2_grpc import SliceServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.tools.context_queries.InterDomain import is_multi_domain from common.tools.grpc.ConfigRules import copy_config_rules from common.tools.grpc.Constraints import copy_constraints @@ -29,9 +29,7 @@ from service.client.ServiceClient import ServiceClient LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Slice' -METHOD_NAMES = ['CreateSlice', 'UpdateSlice', 'DeleteSlice'] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Slice', 'RPC') class SliceServiceServicerImpl(SliceServiceServicer): def __init__(self): @@ -158,7 +156,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client.SetSlice(slice_active) return slice_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def CreateSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: #try: # slice_ = context_client.GetSlice(request.slice_id) @@ -168,7 +166,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): #return slice_id return self.create_update(request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UpdateSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: #slice_id = context_client.SetSlice(request) #if len(request.slice_endpoint_ids) != 2: return slice_id @@ -186,7 +184,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): # raise NotImplementedError('Slice should create local services for single domain slice') return self.create_update(request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: context_client = ContextClient() try: diff --git a/src/tests/tools/load_gen/Constants.py b/src/tests/tools/load_gen/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..32b457bae849a50ccbe61e1997aec944cb6a2257 --- /dev/null +++ b/src/tests/tools/load_gen/Constants.py @@ -0,0 +1,28 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + +class RequestType(Enum): + SERVICE_L2NM = 'svc-l2nm' + SERVICE_L3NM = 'svc-l3nm' + SERVICE_TAPI = 'svc-tapi' + SERVICE_MW = 'svc-mw' + SLICE_L2NM = 'slc-l2nm' + SLICE_L3NM = 'slc-l3nm' + +ENDPOINT_COMPATIBILITY = { + 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:INPUT': 'PHOTONIC_MEDIA:FLEX:G_6_25GHZ:OUTPUT', + 'PHOTONIC_MEDIA:DWDM:G_50GHZ:INPUT' : 'PHOTONIC_MEDIA:DWDM:G_50GHZ:OUTPUT', +} diff --git a/src/tests/tools/load_gen/DltTools.py b/src/tests/tools/load_gen/DltTools.py new file mode 100644 index 0000000000000000000000000000000000000000..34d195ad701c3af9bd0d32acb786013151ec01f8 --- /dev/null +++ b/src/tests/tools/load_gen/DltTools.py @@ -0,0 +1,123 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, queue +from typing import Optional, Set, Tuple +from common.proto.context_pb2 import DeviceId, LinkId, ServiceId, SliceId, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltConnectorClient import DltConnectorClient + +def explore_entities_to_record( + slice_id : Optional[SliceId] = None, service_id : Optional[ServiceId] = None +) -> Tuple[Set[str], Set[str], Set[str]]: + + context_client = ContextClient() + + slices_to_record : Set[str] = set() + services_to_record : Set[str] = set() + devices_to_record : Set[str] = set() + + slices_to_explore = queue.Queue() + services_to_explore = queue.Queue() + if slice_id is not None: slices_to_explore.put(slice_id) + if service_id is not None: services_to_explore.put(service_id) + + while not slices_to_explore.empty(): + slice_id = slices_to_explore.get() + slices_to_record.add(grpc_message_to_json_string(slice_id)) + + slice_ = context_client.GetSlice(slice_id) + + for endpoint_id in slice_.slice_endpoint_ids: + devices_to_record.add(grpc_message_to_json_string(endpoint_id.device_id)) + for subslice_id in slice_.slice_subslice_ids: + slices_to_explore.put(subslice_id) + for service_id in slice_.slice_service_ids: + services_to_explore.put(service_id) + + while not services_to_explore.empty(): + service_id = services_to_explore.get() + services_to_record.add(grpc_message_to_json_string(service_id)) + + service = context_client.GetService(service_id) + + for endpoint_id in service.service_endpoint_ids: + devices_to_record.add(grpc_message_to_json_string(endpoint_id.device_id)) + + connections = context_client.ListConnections(service_id) + for connection in connections.connections: + for endpoint_id in connection.path_hops_endpoint_ids: + devices_to_record.add(grpc_message_to_json_string(endpoint_id.device_id)) + for service_id in connection.sub_service_ids: + services_to_explore.put(service_id) + + return slices_to_record, services_to_record, devices_to_record + +def record_device_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, device_id : DeviceId, delete : bool = False +) -> None: + dlt_device_id = DltDeviceId() + dlt_device_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_device_id.device_id.CopyFrom(device_id) # pylint: disable=no-member + dlt_device_id.delete = delete + dlt_connector_client.RecordDevice(dlt_device_id) + +def record_link_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, link_id : LinkId, delete : bool = False +) -> None: + dlt_link_id = DltLinkId() + dlt_link_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_link_id.link_id.CopyFrom(link_id) # pylint: disable=no-member + dlt_link_id.delete = delete + dlt_connector_client.RecordLink(dlt_link_id) + +def record_service_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, service_id : ServiceId, delete : bool = False +) -> None: + dlt_service_id = DltServiceId() + dlt_service_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_service_id.service_id.CopyFrom(service_id) # pylint: disable=no-member + dlt_service_id.delete = delete + dlt_connector_client.RecordService(dlt_service_id) + +def record_slice_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, slice_id : SliceId, delete : bool = False +) -> None: + dlt_slice_id = DltSliceId() + dlt_slice_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member + dlt_slice_id.delete = delete + dlt_connector_client.RecordSlice(dlt_slice_id) + +def record_entities( + slices_to_record : Set[str] = set(), services_to_record : Set[str] = set(), devices_to_record : Set[str] = set(), + delete : bool = False +) -> None: + dlt_connector_client = DltConnectorClient() + dlt_domain_id = TopologyId(**json_topology_id('dlt-perf-eval')) + + for str_device_id in devices_to_record: + device_id = DeviceId(**(json.loads(str_device_id))) + record_device_to_dlt(dlt_connector_client, dlt_domain_id, device_id, delete=delete) + + for str_service_id in services_to_record: + service_id = ServiceId(**(json.loads(str_service_id))) + record_service_to_dlt(dlt_connector_client, dlt_domain_id, service_id, delete=delete) + + for str_slice_id in slices_to_record: + slice_id = SliceId(**(json.loads(str_slice_id))) + record_slice_to_dlt(dlt_connector_client, dlt_domain_id, slice_id, delete=delete) diff --git a/src/tests/tools/load_gen/Parameters.py b/src/tests/tools/load_gen/Parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..c74d18248c6000cd6da18d5c7e0e55ef2be41730 --- /dev/null +++ b/src/tests/tools/load_gen/Parameters.py @@ -0,0 +1,68 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional + +class Parameters: + def __init__( + self, num_requests : int, request_types : List[str], offered_load : Optional[float] = None, + inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None, + dry_mode : bool = False, record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None + ) -> None: + self._num_requests = num_requests + self._request_types = request_types + self._offered_load = offered_load + self._inter_arrival_time = inter_arrival_time + self._holding_time = holding_time + self._dry_mode = dry_mode + self._record_to_dlt = record_to_dlt + self._dlt_domain_id = dlt_domain_id + + if self._offered_load is None and self._holding_time is not None and self._inter_arrival_time is not None: + self._offered_load = self._holding_time / self._inter_arrival_time + elif self._offered_load is not None and self._holding_time is not None and self._inter_arrival_time is None: + self._inter_arrival_time = self._holding_time / self._offered_load + elif self._offered_load is not None and self._holding_time is None and self._inter_arrival_time is not None: + self._holding_time = self._offered_load * self._inter_arrival_time + else: + MSG = 'Exactly two of offered_load({:s}), inter_arrival_time({:s}), holding_time({:s}) must be specified.' + raise Exception(MSG.format(str(self._offered_load), str(self._inter_arrival_time), str(self._holding_time))) + + if self._record_to_dlt and self._dlt_domain_id is None: + MSG = 'Parameter dlt_domain_id({:s}) must be specified with record_to_dlt({:s}).' + raise Exception(MSG.format(str(self._dlt_domain_id), str(self._record_to_dlt))) + + @property + def num_requests(self): return self._num_requests + + @property + def request_types(self): return self._request_types + + @property + def offered_load(self): return self._offered_load + + @property + def inter_arrival_time(self): return self._inter_arrival_time + + @property + def holding_time(self): return self._holding_time + + @property + def dry_mode(self): return self._dry_mode + + @property + def record_to_dlt(self): return self._record_to_dlt + + @property + def dlt_domain_id(self): return self._dlt_domain_id diff --git a/src/tests/tools/load_gen/RequestGenerator.py b/src/tests/tools/load_gen/RequestGenerator.py new file mode 100644 index 0000000000000000000000000000000000000000..d38291d380d044fa3b91a1b653ea47f6e917fe16 --- /dev/null +++ b/src/tests/tools/load_gen/RequestGenerator.py @@ -0,0 +1,409 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, json, random, threading +from typing import Dict, Optional, Set, Tuple +from common.proto.context_pb2 import Empty, TopologyId +from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.ConfigRule import json_config_rule_set +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.EndPoint import json_endpoint_id +from common.tools.object_factory.Service import ( + json_service_l2nm_planned, json_service_l3nm_planned, json_service_tapi_planned) +from common.tools.object_factory.Slice import json_slice +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from tests.tools.load_gen.DltTools import record_device_to_dlt, record_link_to_dlt +from .Constants import ENDPOINT_COMPATIBILITY, RequestType +from .Parameters import Parameters + +LOGGER = logging.getLogger(__name__) + +class RequestGenerator: + def __init__(self, parameters : Parameters) -> None: + self._parameters = parameters + self._lock = threading.Lock() + self._num_requests = 0 + self._available_device_endpoints : Dict[str, Set[str]] = dict() + self._used_device_endpoints : Dict[str, Dict[str, str]] = dict() + self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict() + self._endpoint_types_to_ids : Dict[str, Set[Tuple[str, str]]] = dict() + + def initialize(self) -> None: + with self._lock: + self._available_device_endpoints.clear() + self._used_device_endpoints.clear() + + context_client = ContextClient() + dlt_connector_client = DltConnectorClient() + + if self._parameters.record_to_dlt: + dlt_domain_id = TopologyId(**json_topology_id('dlt-perf-eval')) + + devices = context_client.ListDevices(Empty()) + for device in devices.devices: + device_uuid = device.device_id.device_uuid.uuid + _endpoints = self._available_device_endpoints.setdefault(device_uuid, set()) + for endpoint in device.device_endpoints: + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_type = endpoint.endpoint_type + _endpoints.add(endpoint_uuid) + self._endpoint_ids_to_types.setdefault((device_uuid, endpoint_uuid), endpoint_type) + self._endpoint_types_to_ids.setdefault(endpoint_type, set()).add((device_uuid, endpoint_uuid)) + + if self._parameters.record_to_dlt: + record_device_to_dlt(dlt_connector_client, dlt_domain_id, device.device_id) + + links = context_client.ListLinks(Empty()) + for link in links.links: + for endpoint_id in link.link_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + _endpoints = self._available_device_endpoints.get(device_uuid, set()) + _endpoints.discard(endpoint_uuid) + if len(_endpoints) == 0: self._available_device_endpoints.pop(device_uuid, None) + + endpoint_type = self._endpoint_ids_to_types.pop((device_uuid, endpoint_uuid), None) + if endpoint_type is None: continue + + if endpoint_type not in self._endpoint_types_to_ids: continue + endpoints_for_type = self._endpoint_types_to_ids[endpoint_type] + endpoint_key = (device_uuid, endpoint_uuid) + if endpoint_key not in endpoints_for_type: continue + endpoints_for_type.discard(endpoint_key) + + if self._parameters.record_to_dlt: + record_link_to_dlt(dlt_connector_client, dlt_domain_id, link.link_id) + + @property + def num_requests_generated(self): return self._num_requests + + def dump_state(self) -> None: + with self._lock: + _endpoints = { + device_uuid:[endpoint_uuid for endpoint_uuid in endpoint_uuids] + for device_uuid,endpoint_uuids in self._available_device_endpoints.items() + } + LOGGER.info('[dump_state] available_device_endpoints = {:s}'.format(json.dumps(_endpoints))) + LOGGER.info('[dump_state] used_device_endpoints = {:s}'.format(json.dumps(self._used_device_endpoints))) + + def _use_device_endpoint( + self, service_uuid : str, request_type : RequestType, endpoint_types : Optional[Set[str]] = None, + exclude_device_uuids : Set[str] = set(), exclude_endpoint_uuids : Set[Tuple[str, str]] = set(), + ) -> Optional[Tuple[str, str]]: + with self._lock: + compatible_endpoints : Set[Tuple[str, str]] = set() + elegible_device_endpoints : Dict[str, Set[str]] = {} + + if endpoint_types is None: + # allow all + elegible_device_endpoints : Dict[str, Set[str]] = { + device_uuid:[ + endpoint_uuid for endpoint_uuid in device_endpoint_uuids + if (len(exclude_endpoint_uuids) == 0) or \ + ((device_uuid,endpoint_uuid) not in exclude_endpoint_uuids) + ] + for device_uuid,device_endpoint_uuids in self._available_device_endpoints.items() + if (device_uuid not in exclude_device_uuids) and \ + (len(device_endpoint_uuids) > 0) + } + else: + # allow only compatible endpoints + for endpoint_type in endpoint_types: + if endpoint_type not in self._endpoint_types_to_ids: continue + compatible_endpoints.update(self._endpoint_types_to_ids[endpoint_type]) + + for device_uuid,device_endpoint_uuids in self._available_device_endpoints.items(): + if device_uuid in exclude_device_uuids or len(device_endpoint_uuids) == 0: continue + for endpoint_uuid in device_endpoint_uuids: + endpoint_key = (device_uuid,endpoint_uuid) + if endpoint_key in exclude_endpoint_uuids: continue + if endpoint_key not in compatible_endpoints: continue + elegible_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid) + + if len(elegible_device_endpoints) == 0: + LOGGER.warning(' '.join([ + '>> No endpoint is available:', + 'endpoint_types={:s}'.format(str(endpoint_types)), + 'exclude_device_uuids={:s}'.format(str(exclude_device_uuids)), + 'self._endpoint_types_to_ids={:s}'.format(str(self._endpoint_types_to_ids)), + 'self._available_device_endpoints={:s}'.format(str(self._available_device_endpoints)), + 'compatible_endpoints={:s}'.format(str(compatible_endpoints)), + ])) + return None + + device_uuid = random.choice(list(elegible_device_endpoints.keys())) + device_endpoint_uuids = elegible_device_endpoints.get(device_uuid) + endpoint_uuid = random.choice(list(device_endpoint_uuids)) + if request_type not in {RequestType.SERVICE_MW}: + # reserve the resources + self._available_device_endpoints.setdefault(device_uuid, set()).discard(endpoint_uuid) + self._used_device_endpoints.setdefault(device_uuid, dict())[endpoint_uuid] = service_uuid + return device_uuid, endpoint_uuid + + def _release_device_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None: + with self._lock: + self._used_device_endpoints.setdefault(device_uuid, dict()).pop(endpoint_uuid, None) + self._available_device_endpoints.setdefault(device_uuid, set()).add(endpoint_uuid) + + def compose_request(self) -> Optional[Dict]: + with self._lock: + self._num_requests += 1 + num_request = self._num_requests + + #request_uuid = str(uuid.uuid4()) + request_uuid = 'svc_{:d}'.format(num_request) + + # choose request type + request_type = random.choice(self._parameters.request_types) + + if request_type in { + RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, RequestType.SERVICE_TAPI, RequestType.SERVICE_MW + }: + return self._compose_service(num_request, request_uuid, request_type) + elif request_type in {RequestType.SLICE_L2NM, RequestType.SLICE_L3NM}: + return self._compose_slice(num_request, request_uuid, request_type) + + def _compose_service(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]: + # choose source endpoint + src_endpoint_types = set(ENDPOINT_COMPATIBILITY.keys()) if request_type in {RequestType.SERVICE_TAPI} else None + src = self._use_device_endpoint(request_uuid, request_type, endpoint_types=src_endpoint_types) + if src is None: + LOGGER.warning('>> No source endpoint is available') + return None + src_device_uuid,src_endpoint_uuid = src + + # identify compatible destination endpoint types + src_endpoint_type = self._endpoint_ids_to_types.get((src_device_uuid,src_endpoint_uuid)) + dst_endpoint_type = ENDPOINT_COMPATIBILITY.get(src_endpoint_type) + dst_endpoint_types = {dst_endpoint_type} if request_type in {RequestType.SERVICE_TAPI} else None + + # identify excluded destination devices + exclude_device_uuids = {} if request_type in {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} else {src_device_uuid} + + # choose feasible destination endpoint + dst = self._use_device_endpoint( + request_uuid, request_type, endpoint_types=dst_endpoint_types, exclude_device_uuids=exclude_device_uuids, + exclude_endpoint_uuids={src}) + + # if destination endpoint not found, release source, and terminate current service generation + if dst is None: + LOGGER.warning('>> No destination endpoint is available') + self._release_device_endpoint(src_device_uuid, src_endpoint_uuid) + return None + + # compose endpoints + dst_device_uuid,dst_endpoint_uuid = dst + endpoint_ids = [ + json_endpoint_id(json_device_id(src_device_uuid), src_endpoint_uuid), + json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), + ] + + if request_type == RequestType.SERVICE_L2NM: + constraints = [ + json_constraint_custom('bandwidth[gbps]', '10.0'), + json_constraint_custom('latency[ms]', '20.0'), + ] + vlan_id = num_request % 1000 + circuit_id = '{:03d}'.format(vlan_id) + src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + config_rules = [ + json_config_rule_set('/settings', { + 'mtu': 1512 + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id': src_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': dst_router_id, + 'circuit_id': circuit_id, + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id': dst_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': src_router_id, + 'circuit_id': circuit_id, + }), + ] + return json_service_l2nm_planned( + request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) + + elif request_type == RequestType.SERVICE_L3NM: + constraints = [ + json_constraint_custom('bandwidth[gbps]', '10.0'), + json_constraint_custom('latency[ms]', '20.0'), + ] + vlan_id = num_request % 1000 + bgp_as = 60000 + (num_request % 10000) + bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333) + route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id) + src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + src_address_ip = '.'.join([src_device_uuid.replace('R', ''), '0'] + src_endpoint_uuid.split('/')) + dst_address_ip = '.'.join([dst_device_uuid.replace('R', ''), '0'] + dst_endpoint_uuid.split('/')) + config_rules = [ + json_config_rule_set('/settings', { + 'mtu' : 1512, + 'bgp_as' : bgp_as, + 'bgp_route_target': bgp_route_target, + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id' : src_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : src_address_ip, + 'address_prefix' : 16, + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id' : dst_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : dst_address_ip, + 'address_prefix' : 16, + }), + ] + return json_service_l3nm_planned( + request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) + + elif request_type == RequestType.SERVICE_TAPI: + config_rules = [ + json_config_rule_set('/settings', { + 'capacity_value' : 50.0, + 'capacity_unit' : 'GHz', + 'layer_proto_name': 'PHOTONIC_MEDIA', + 'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC', + 'direction' : 'UNIDIRECTIONAL', + }), + ] + return json_service_tapi_planned( + request_uuid, endpoint_ids=endpoint_ids, constraints=[], config_rules=config_rules) + + elif request_type == RequestType.SERVICE_MW: + vlan_id = 1000 + num_request % 1000 + config_rules = [ + json_config_rule_set('/settings', { + 'vlan_id': vlan_id, + }), + ] + return json_service_l2nm_planned( + request_uuid, endpoint_ids=endpoint_ids, constraints=[], config_rules=config_rules) + + def _compose_slice(self, num_request : int, request_uuid : str, request_type : str) -> Optional[Dict]: + # choose source endpoint + src = self._use_device_endpoint(request_uuid, request_type) + if src is None: + LOGGER.warning('>> No source endpoint is available') + return None + src_device_uuid,src_endpoint_uuid = src + + # identify excluded destination devices + exclude_device_uuids = {} if request_type in {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} else {src_device_uuid} + + # choose feasible destination endpoint + dst = self._use_device_endpoint(request_uuid, request_type, exclude_device_uuids=exclude_device_uuids) + + # if destination endpoint not found, release source, and terminate current service generation + if dst is None: + LOGGER.warning('>> No destination endpoint is available') + self._release_device_endpoint(src_device_uuid, src_endpoint_uuid) + return None + + # compose endpoints + dst_device_uuid,dst_endpoint_uuid = dst + endpoint_ids = [ + json_endpoint_id(json_device_id(src_device_uuid), src_endpoint_uuid), + json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), + ] + constraints = [ + json_constraint_custom('bandwidth[gbps]', '10.0'), + json_constraint_custom('latency[ms]', '20.0'), + ] + + if request_type == RequestType.SLICE_L2NM: + vlan_id = num_request % 1000 + circuit_id = '{:03d}'.format(vlan_id) + src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + config_rules = [ + json_config_rule_set('/settings', { + 'mtu': 1512 + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id': src_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': dst_router_id, + 'circuit_id': circuit_id, + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id': dst_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': src_router_id, + 'circuit_id': circuit_id, + }), + ] + + elif request_type == RequestType.SLICE_L3NM: + vlan_id = num_request % 1000 + bgp_as = 60000 + (num_request % 10000) + bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333) + route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id) + src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + src_address_ip = '.'.join([src_device_uuid.replace('R', ''), '0'] + src_endpoint_uuid.split('/')) + dst_address_ip = '.'.join([dst_device_uuid.replace('R', ''), '0'] + dst_endpoint_uuid.split('/')) + config_rules = [ + json_config_rule_set('/settings', { + 'mtu' : 1512, + 'bgp_as' : bgp_as, + 'bgp_route_target': bgp_route_target, + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id' : src_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : src_address_ip, + 'address_prefix' : 16, + }), + json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id' : dst_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : dst_address_ip, + 'address_prefix' : 16, + }), + ] + + return json_slice( + request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) + + def release_request(self, json_request : Dict) -> None: + if 'service_id' in json_request: + for endpoint_id in json_request['service_endpoint_ids']: + device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] + endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] + self._release_device_endpoint(device_uuid, endpoint_uuid) + elif 'slice_id' in json_request: + for endpoint_id in json_request['slice_endpoint_ids']: + device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] + endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] + self._release_device_endpoint(device_uuid, endpoint_uuid) diff --git a/src/tests/tools/load_gen/RequestScheduler.py b/src/tests/tools/load_gen/RequestScheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..eafb95c30032e69ab4f2f7874656b11db4f6817f --- /dev/null +++ b/src/tests/tools/load_gen/RequestScheduler.py @@ -0,0 +1,192 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytz, random +from apscheduler.executors.pool import ThreadPoolExecutor +from apscheduler.jobstores.memory import MemoryJobStore +from apscheduler.schedulers.blocking import BlockingScheduler +from datetime import datetime, timedelta +from typing import Dict, Optional +from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId +from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient +from .DltTools import explore_entities_to_record, record_entities +from .Parameters import Parameters +from .RequestGenerator import RequestGenerator + +logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING) +logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) + +LOGGER = logging.getLogger(__name__) + +class RequestScheduler: + def __init__(self, parameters : Parameters, generator : RequestGenerator) -> None: + self._scheduler = BlockingScheduler() + self._scheduler.configure( + jobstores = {'default': MemoryJobStore()}, + executors = {'default': ThreadPoolExecutor(max_workers=10)}, + job_defaults = { + 'coalesce': False, + 'max_instances': 100, + 'misfire_grace_time': 120, + }, + timezone=pytz.utc) + self._parameters = parameters + self._generator = generator + + def _schedule_request_setup(self) -> None: + if self._generator.num_requests_generated >= self._parameters.num_requests: + LOGGER.info('Generation Done!') + #self._scheduler.shutdown() + return + iat = random.expovariate(1.0 / self._parameters.inter_arrival_time) + run_date = datetime.utcnow() + timedelta(seconds=iat) + self._scheduler.add_job( + self._request_setup, trigger='date', run_date=run_date, timezone=pytz.utc) + + def _schedule_request_teardown(self, request : Dict) -> None: + ht = random.expovariate(1.0 / self._parameters.holding_time) + run_date = datetime.utcnow() + timedelta(seconds=ht) + self._scheduler.add_job( + self._request_teardown, args=(request,), trigger='date', run_date=run_date, timezone=pytz.utc) + + def start(self): + self._schedule_request_setup() + self._scheduler.start() + + def _request_setup(self) -> None: + self._schedule_request_setup() + + request = self._generator.compose_request() + if request is None: + LOGGER.warning('No resources available to compose new request') + return + + if 'service_id' in request: + service_uuid = request['service_id']['service_uuid']['uuid'] + src_device_uuid = request['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid'] + src_endpoint_uuid = request['service_endpoint_ids'][0]['endpoint_uuid']['uuid'] + dst_device_uuid = request['service_endpoint_ids'][1]['device_id']['device_uuid']['uuid'] + dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid'] + LOGGER.info('Setup Service: uuid=%s src=%s:%s dst=%s:%s', + service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid) + self._create_update(service=request) + + elif 'slice_id' in request: + slice_uuid = request['slice_id']['slice_uuid']['uuid'] + src_device_uuid = request['slice_endpoint_ids'][0]['device_id']['device_uuid']['uuid'] + src_endpoint_uuid = request['slice_endpoint_ids'][0]['endpoint_uuid']['uuid'] + dst_device_uuid = request['slice_endpoint_ids'][1]['device_id']['device_uuid']['uuid'] + dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid'] + LOGGER.info('Setup Slice: uuid=%s src=%s:%s dst=%s:%s', + slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid) + self._create_update(slice_=request) + + self._schedule_request_teardown(request) + + def _request_teardown(self, request : Dict) -> None: + if 'service_id' in request: + service_uuid = request['service_id']['service_uuid']['uuid'] + src_device_uuid = request['service_endpoint_ids'][0]['device_id']['device_uuid']['uuid'] + src_endpoint_uuid = request['service_endpoint_ids'][0]['endpoint_uuid']['uuid'] + dst_device_uuid = request['service_endpoint_ids'][1]['device_id']['device_uuid']['uuid'] + dst_endpoint_uuid = request['service_endpoint_ids'][1]['endpoint_uuid']['uuid'] + LOGGER.info('Teardown Service: uuid=%s src=%s:%s dst=%s:%s', + service_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid) + self._delete(service_id=ServiceId(**(request['service_id']))) + + elif 'slice_id' in request: + slice_uuid = request['slice_id']['slice_uuid']['uuid'] + src_device_uuid = request['slice_endpoint_ids'][0]['device_id']['device_uuid']['uuid'] + src_endpoint_uuid = request['slice_endpoint_ids'][0]['endpoint_uuid']['uuid'] + dst_device_uuid = request['slice_endpoint_ids'][1]['device_id']['device_uuid']['uuid'] + dst_endpoint_uuid = request['slice_endpoint_ids'][1]['endpoint_uuid']['uuid'] + LOGGER.info('Teardown Slice: uuid=%s src=%s:%s dst=%s:%s', + slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid) + self._delete(slice_id=SliceId(**(request['slice_id']))) + + self._generator.release_request(request) + + def _create_update(self, service : Optional[Dict] = None, slice_ : Optional[Dict] = None) -> None: + if self._parameters.dry_mode: return + + service_id = None + if service is not None: + service_add = copy.deepcopy(service) + service_add['service_endpoint_ids'] = [] + service_add['service_constraints'] = [] + service_add['service_config'] = {'config_rules': []} + + service_client = ServiceClient() + service_id = service_client.CreateService(Service(**service_add)) + service_client.close() + + slice_id = None + if slice_ is not None: + slice_add = copy.deepcopy(slice_) + slice_add['slice_endpoint_ids'] = [] + slice_add['slice_constraints'] = [] + slice_add['slice_config'] = {'config_rules': []} + + slice_client = SliceClient() + slice_id = slice_client.CreateSlice(Slice(**slice_add)) + slice_client.close() + + if self._parameters.record_to_dlt: + entities_to_record = explore_entities_to_record(slice_id=slice_id, service_id=service_id) + slices_to_record, services_to_record, devices_to_record = entities_to_record + record_entities( + slices_to_record=slices_to_record, services_to_record=services_to_record, + devices_to_record=devices_to_record, delete=False) + + service_id = None + if service is not None: + service_client = ServiceClient() + service_id = service_client.UpdateService(Service(**service)) + service_client.close() + + slice_id = None + if slice_ is not None: + slice_client = SliceClient() + slice_id = slice_client.UpdateSlice(Slice(**slice_)) + slice_client.close() + + if self._parameters.record_to_dlt: + entities_to_record = explore_entities_to_record(slice_id=slice_id, service_id=service_id) + slices_to_record, services_to_record, devices_to_record = entities_to_record + record_entities( + slices_to_record=slices_to_record, services_to_record=services_to_record, + devices_to_record=devices_to_record, delete=False) + + def _delete(self, service_id : Optional[ServiceId] = None, slice_id : Optional[SliceId] = None) -> None: + if self._parameters.dry_mode: return + + if self._parameters.record_to_dlt: + entities_to_record = explore_entities_to_record(slice_id=slice_id, service_id=service_id) + slices_to_record, services_to_record, devices_to_record = entities_to_record + + if slice_id is not None: + slice_client = SliceClient() + slice_client.DeleteSlice(slice_id) + slice_client.close() + + if service_id is not None: + service_client = ServiceClient() + service_client.DeleteService(service_id) + service_client.close() + + if self._parameters.record_to_dlt: + record_entities( + slices_to_record=slices_to_record, services_to_record=services_to_record, + devices_to_record=devices_to_record, delete=True) diff --git a/src/tests/tools/load_gen/__init__.py b/src/tests/tools/load_gen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/tools/load_gen/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/load_gen/__main__.py b/src/tests/tools/load_gen/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5ea2b6949d1b6dd50d0a40407c6740bf266dd3 --- /dev/null +++ b/src/tests/tools/load_gen/__main__.py @@ -0,0 +1,55 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sys +from .Constants import RequestType +from .Parameters import Parameters +from .RequestGenerator import RequestGenerator +from .RequestScheduler import RequestScheduler + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +def main(): + LOGGER.info('Starting...') + parameters = Parameters( + num_requests = 100, + request_types = [ + RequestType.SERVICE_L2NM, + RequestType.SERVICE_L3NM, + #RequestType.SERVICE_MW, + #RequestType.SERVICE_TAPI, + RequestType.SLICE_L2NM, + RequestType.SLICE_L3NM, + ], + offered_load = 50, + holding_time = 10, + dry_mode = False, # in dry mode, no request is sent to TeraFlowSDN + record_to_dlt = False, # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT + dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False + ) + + LOGGER.info('Initializing Generator...') + generator = RequestGenerator(parameters) + generator.initialize() + + LOGGER.info('Running Schedule...') + scheduler = RequestScheduler(parameters, generator) + scheduler.start() + + LOGGER.info('Done!') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/tools/load_gen/deploy_specs.sh b/src/tests/tools/load_gen/deploy_specs.sh new file mode 100644 index 0000000000000000000000000000000000000000..a688f1c0ad920bab2fb5157dce72225671ed837e --- /dev/null +++ b/src/tests/tools/load_gen/deploy_specs.sh @@ -0,0 +1,26 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +# Supported components are: +# context device automation policy service compute monitoring webui +# interdomain slice pathcomp dlt +# dbscanserving opticalattackmitigator opticalattackdetector +# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector +export TFS_COMPONENTS="context device pathcomp service slice webui" # automation monitoring compute dlt + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicemonitors.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="NO" #${TFS_SKIP_BUILD:-"YES"} diff --git a/src/tests/tools/load_gen/descriptors.json b/src/tests/tools/load_gen/descriptors.json new file mode 100644 index 0000000000000000000000000000000000000000..5fb0c086749cab3343277c28a902b3db48651320 --- /dev/null +++ b/src/tests/tools/load_gen/descriptors.json @@ -0,0 +1,229 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_uuid": {"uuid": "admin"} + }, + "device_ids": [ + {"device_uuid": {"uuid": "R1"}}, + {"device_uuid": {"uuid": "R2"}}, + {"device_uuid": {"uuid": "R3"}}, + {"device_uuid": {"uuid": "R4"}}, + {"device_uuid": {"uuid": "R5"}}, + {"device_uuid": {"uuid": "R6"}}, + {"device_uuid": {"uuid": "R7"}} + ], + "link_ids": [ + {"link_uuid": {"uuid": "R1==R2"}}, + {"link_uuid": {"uuid": "R2==R3"}}, + {"link_uuid": {"uuid": "R3==R4"}}, + {"link_uuid": {"uuid": "R4==R5"}}, + {"link_uuid": {"uuid": "R5==R6"}}, + {"link_uuid": {"uuid": "R6==R1"}}, + {"link_uuid": {"uuid": "R1==R7"}}, + {"link_uuid": {"uuid": "R3==R7"}}, + {"link_uuid": {"uuid": "R5==R7"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R6"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R7"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"}, + {"sample_types": [], "type": "copper", "uuid": "2/4"}, + {"sample_types": [], "type": "copper", "uuid": "2/5"}, + {"sample_types": [], "type": "copper", "uuid": "2/6"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1==R2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2==R3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3==R4"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4==R5"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R5==R6"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R6==R1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1==R7"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3==R7"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R5==R7"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}} + ] + } + ] +} \ No newline at end of file diff --git a/src/tests/tools/load_gen/run.sh b/src/tests/tools/load_gen/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..b16808ab6905927728212185681e2a6d4a5135ba --- /dev/null +++ b/src/tests/tools/load_gen/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m tests.tools.load_gen diff --git a/src/tests/tools/load_gen/test_dlt_functional.py b/src/tests/tools/load_gen/test_dlt_functional.py new file mode 100644 index 0000000000000000000000000000000000000000..9c6c3d5ba65d538628d75b2c0a0010963357f8b7 --- /dev/null +++ b/src/tests/tools/load_gen/test_dlt_functional.py @@ -0,0 +1,73 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from common.proto.context_pb2 import ( + DEVICEOPERATIONALSTATUS_ENABLED, Device, DeviceId, LinkId, ServiceId, SliceId, TopologyId) +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId +from common.tools.object_factory.Device import json_device +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltConnectorClient import DltConnectorClient + +def record_device_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, device_id : DeviceId, delete : bool = False +) -> None: + dlt_device_id = DltDeviceId() + dlt_device_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_device_id.device_id.CopyFrom(device_id) # pylint: disable=no-member + dlt_device_id.delete = delete + dlt_connector_client.RecordDevice(dlt_device_id) + +def record_link_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, link_id : LinkId, delete : bool = False +) -> None: + dlt_link_id = DltLinkId() + dlt_link_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_link_id.link_id.CopyFrom(link_id) # pylint: disable=no-member + dlt_link_id.delete = delete + dlt_connector_client.RecordLink(dlt_link_id) + +def record_service_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, service_id : ServiceId, delete : bool = False +) -> None: + dlt_service_id = DltServiceId() + dlt_service_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_service_id.service_id.CopyFrom(service_id) # pylint: disable=no-member + dlt_service_id.delete = delete + dlt_connector_client.RecordService(dlt_service_id) + +def record_slice_to_dlt( + dlt_connector_client : DltConnectorClient, domain_id : TopologyId, slice_id : SliceId, delete : bool = False +) -> None: + dlt_slice_id = DltSliceId() + dlt_slice_id.topology_id.CopyFrom(domain_id) # pylint: disable=no-member + dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member + dlt_slice_id.delete = delete + dlt_connector_client.RecordSlice(dlt_slice_id) + +def main(): + context_client = ContextClient() + dlt_connector_client = DltConnectorClient() + + device = Device(**json_device('test-device', 'packet-router', DEVICEOPERATIONALSTATUS_ENABLED)) + device_id = context_client.SetDevice(device) + + dlt_domain_id = TopologyId(**json_topology_id('dlt-func-test')) + record_device_to_dlt(dlt_connector_client, dlt_domain_id, device_id, delete=False) + + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/tools/load_scenario/README.md b/src/tests/tools/load_scenario/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3845cbf01dda4385c431a3b52d381360e6dc5e9f --- /dev/null +++ b/src/tests/tools/load_scenario/README.md @@ -0,0 +1,17 @@ +# Tool: Load Scenario: + +Simple tool to populate ETSI TeraFlowSDN controller with same descriptors that can be loaded through the WebUI. + +## Example: + +Deploy TeraFlowSDN controller with your specific settings: +```(bash) +cd ~/tfs-ctrl +source my_deploy.sh +./deploy.sh +``` + +Populate TeraFlowSDN controller with your descriptor file: +```(bash) +./src/tests/tools/load_scenario/run.sh src/tests/tools/load_scenario/example_descriptors.json +``` diff --git a/src/tests/tools/load_scenario/__init__.py b/src/tests/tools/load_scenario/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/tools/load_scenario/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/load_scenario/__main__.py b/src/tests/tools/load_scenario/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2cd11919ab8afd116e6cab8d7d9a6d4fd3ce54b --- /dev/null +++ b/src/tests/tools/load_scenario/__main__.py @@ -0,0 +1,37 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sys +from common.tests.LoadScenario import load_scenario_from_descriptor +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +def main(): + context_client = ContextClient() + device_client = DeviceClient() + service_client = ServiceClient() + slice_client = SliceClient() + + LOGGER.info('Loading scenario...') + load_scenario_from_descriptor(sys.argv[1], context_client, device_client, service_client, slice_client) + LOGGER.info('Done!') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/tools/load_scenario/example_descriptors.json b/src/tests/tools/load_scenario/example_descriptors.json new file mode 100644 index 0000000000000000000000000000000000000000..5fb0c086749cab3343277c28a902b3db48651320 --- /dev/null +++ b/src/tests/tools/load_scenario/example_descriptors.json @@ -0,0 +1,229 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_uuid": {"uuid": "admin"} + }, + "device_ids": [ + {"device_uuid": {"uuid": "R1"}}, + {"device_uuid": {"uuid": "R2"}}, + {"device_uuid": {"uuid": "R3"}}, + {"device_uuid": {"uuid": "R4"}}, + {"device_uuid": {"uuid": "R5"}}, + {"device_uuid": {"uuid": "R6"}}, + {"device_uuid": {"uuid": "R7"}} + ], + "link_ids": [ + {"link_uuid": {"uuid": "R1==R2"}}, + {"link_uuid": {"uuid": "R2==R3"}}, + {"link_uuid": {"uuid": "R3==R4"}}, + {"link_uuid": {"uuid": "R4==R5"}}, + {"link_uuid": {"uuid": "R5==R6"}}, + {"link_uuid": {"uuid": "R6==R1"}}, + {"link_uuid": {"uuid": "R1==R7"}}, + {"link_uuid": {"uuid": "R3==R7"}}, + {"link_uuid": {"uuid": "R5==R7"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R6"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R7"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"}, + {"sample_types": [], "type": "copper", "uuid": "2/4"}, + {"sample_types": [], "type": "copper", "uuid": "2/5"}, + {"sample_types": [], "type": "copper", "uuid": "2/6"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1==R2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2==R3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3==R4"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4==R5"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R5==R6"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R6==R1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1==R7"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3==R7"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R5==R7"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}} + ] + } + ] +} \ No newline at end of file diff --git a/src/tests/tools/load_scenario/run.sh b/src/tests/tools/load_scenario/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..0ec0c3725d27c1246b776fef8f89a57beb561555 --- /dev/null +++ b/src/tests/tools/load_scenario/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m tests.tools.load_scenario $1