Skip to content
Snippets Groups Projects
Commit 56089f19 authored by Waleed Akbar's avatar Waleed Akbar
Browse files

Changes in Kpi_value_api and added new monitoring namespace

- Added Prometheus monitoring deployment and update test configurations
- Updated KPI VALUE API (writer and promWriter)
parent af6688ba
No related branches found
No related tags found
4 merge requests!359Draft: Release TeraFlowSDN 5.0,!346Draft: support for restconf protocol,!345Draft: support ipinfusion devices via netconf,!320Resolve "(CTTC) Telemetry Enhancement"
Showing
with 190 additions and 54 deletions
...@@ -215,6 +215,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} ...@@ -215,6 +215,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
# Deploy Apache Kafka # Deploy Apache Kafka
./deploy/kafka.sh ./deploy/kafka.sh
#Deploy Monitoring (Prometheus, Mimir, Grafana)
./deploy/monitoring.sh
# Expose Dashboard # Expose Dashboard
./deploy/expose_dashboard.sh ./deploy/expose_dashboard.sh
......
#!/bin/bash
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RELEASE_NAME="mon-prometheus"
NAMESPACE="monitoring"
CHART_REPO_NAME="prometheus-community"
CHART_REPO_URL="https://prometheus-community.github.io/helm-charts"
CHART_NAME="prometheus" # Chart name within the repo
VALUES_FILE="manifests/prometheus/prometheus.yaml"
echo ">>> Deploying Prometheus with the following configuration:"
echo "Adding/updating Helm repo: $CHART_REPO_NAME -> $CHART_REPO_URL"
helm repo add "$CHART_REPO_NAME" "$CHART_REPO_URL" || true
helm repo update
echo "Creating namespace '$NAMESPACE' if it doesn't exist..."
kubectl get namespace "$NAMESPACE" >/dev/null 2>&1 || kubectl create namespace "$NAMESPACE"
#------------------------------------------------------------------------------
# 3. Install or upgrade the Prometheus chart
# - If 'VALUES_FILE' is set, it will use it for custom configuration.
# - Otherwise, it will deploy with the chart defaults.
#------------------------------------------------------------------------------
if [ -n "$VALUES_FILE" ] && [ -f "$VALUES_FILE" ]; then
echo "Installing/Upgrading Prometheus with custom values from $VALUES_FILE..."
helm upgrade --install "$RELEASE_NAME" "$CHART_REPO_NAME/$CHART_NAME" \
--namespace "$NAMESPACE" \
--values "$VALUES_FILE"
else
echo "Installing/Upgrading Prometheus with default chart values..."
helm upgrade --install "$RELEASE_NAME" "$CHART_REPO_NAME/$CHART_NAME" \
--namespace "$NAMESPACE"
fi
echo "Waiting for Prometheus pods to be ready..."
kubectl rollout status deployment/"$RELEASE_NAME"-server -n "$NAMESPACE" || true
# echo "Listing deployed resources in namespace '$NAMESPACE':"
# kubectl get all -n "$NAMESPACE"
echo "<<< Prometheus deployment completed successfully!"
...@@ -39,6 +39,8 @@ spec: ...@@ -39,6 +39,8 @@ spec:
env: env:
- name: LOG_LEVEL - name: LOG_LEVEL
value: "INFO" value: "INFO"
- name: PUSHGATEWAY_URL
value: "http://mon-prometheus-prometheus-pushgateway.monitoring.svc.cluster.local:9091"
envFrom: envFrom:
- secretRef: - secretRef:
name: kfk-kpi-data name: kfk-kpi-data
......
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration for Prometheus components and server settings
# Global Prometheus configuration
alertmanager:
enabled: false # Default is true
kube-state-metrics:
enabled: false # Default is true
prometheus-node-exporter:
enabled: false # Default is true
prometheus-pushgateway:
enabled: true # Default is true
# Prometheus server-specific configuration
server:
retention: "30d"
logLevel: "debug"
resources:
requests:
cpu: "250m"
memory: "256Mi"
limits:
cpu: "1"
memory: "1Gi"
# Expose the Prometheus server via a Kubernetes service
service:
type: NodePort
nodePort: 30090
extraScrapeConfigs:
- job_name: 'pushgateway'
static_configs:
- targets:
- 'prometheus-pushgateway.monitoring.svc.cluster.local:9091' # Push Gateway endpoint
# Global Prometheus settings:
global:
scrape_interval: 10s
evaluation_interval: 10s
...@@ -19,6 +19,7 @@ PROJECTDIR=`pwd` ...@@ -19,6 +19,7 @@ PROJECTDIR=`pwd`
cd $PROJECTDIR/src cd $PROJECTDIR/src
export KFK_SERVER_ADDRESS='127.0.0.1:9092' export KFK_SERVER_ADDRESS='127.0.0.1:9092'
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
kpi_value_writer/tests/test_kpi_value_writer.py kpi_value_writer/tests/test_kpi_value_writer.py
...@@ -25,5 +25,5 @@ export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_telemetr ...@@ -25,5 +25,5 @@ export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_telemetr
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
python3 -m pytest --log-level=debug --log-cli-level=debug --verbose \ python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
telemetry/backend/tests/test_backend.py telemetry/backend/tests/test_backend.py
...@@ -18,10 +18,11 @@ PROJECTDIR=`pwd` ...@@ -18,10 +18,11 @@ PROJECTDIR=`pwd`
cd $PROJECTDIR/src cd $PROJECTDIR/src
# python3 kpi_manager/tests/test_unitary.py
export KFK_SERVER_ADDRESS='127.0.0.1:9092' export KFK_SERVER_ADDRESS='127.0.0.1:9092'
CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_telemetry?sslmode=require" export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_telemetry?sslmode=require"
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
telemetry/frontend/tests/test_frontend.py telemetry/frontend/tests/test_frontend.py
...@@ -56,7 +56,7 @@ def delay_linear(initial=0, increment=0, maximum=None): ...@@ -56,7 +56,7 @@ def delay_linear(initial=0, increment=0, maximum=None):
return delay return delay
return compute return compute
def delay_exponential(initial=1, increment=1, maximum=None): def delay_exponential(initial=1.0, increment=1.0, maximum=None):
def compute(num_try): def compute(num_try):
delay = initial * pow(increment, (num_try - 1)) delay = initial * pow(increment, (num_try - 1))
if maximum is not None: if maximum is not None:
......
...@@ -77,4 +77,4 @@ def create_kpi_filter_request(): ...@@ -77,4 +77,4 @@ def create_kpi_filter_request():
_create_kpi_filter_request.connection_id.append(connection_id_obj) _create_kpi_filter_request.connection_id.append(connection_id_obj)
_create_kpi_filter_request.link_id.append(link_id_obj) _create_kpi_filter_request.link_id.append(link_id_obj)
return _create_kpi_filter_request return _create_kpi_filter_request
\ No newline at end of file
...@@ -15,25 +15,21 @@ ...@@ -15,25 +15,21 @@
import json import json
import logging import logging
import threading import threading
from confluent_kafka import KafkaError
from confluent_kafka import Consumer as KafkaConsumer
from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
from common.proto.kpi_value_api_pb2 import KpiValue
from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId
from common.Settings import get_service_port_grpc from common.Settings import get_service_port_grpc
from common.Constants import ServiceNameEnum from common.Constants import ServiceNameEnum
from common.tools.service.GenericGrpcService import GenericGrpcService from common.tools.service.GenericGrpcService import GenericGrpcService
from confluent_kafka import KafkaError
from confluent_kafka import Consumer as KafkaConsumer
from kpi_manager.client.KpiManagerClient import KpiManagerClient from kpi_manager.client.KpiManagerClient import KpiManagerClient
# -- test import --
# from kpi_value_writer.tests.test_messages import create_kpi_descriptor_request
from .MetricWriterToPrometheus import MetricWriterToPrometheus from .MetricWriterToPrometheus import MetricWriterToPrometheus
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
ACTIVE_CONSUMERS = []
class KpiValueWriter(GenericGrpcService): class KpiValueWriter(GenericGrpcService):
def __init__(self, cls_name : str = __name__) -> None: def __init__(self, cls_name : str = __name__) -> None:
...@@ -43,9 +39,8 @@ class KpiValueWriter(GenericGrpcService): ...@@ -43,9 +39,8 @@ class KpiValueWriter(GenericGrpcService):
'group.id' : 'KpiValueWriter', 'group.id' : 'KpiValueWriter',
'auto.offset.reset' : 'latest'}) 'auto.offset.reset' : 'latest'})
def RunKafkaConsumer(self): def install_servicers(self):
thread = threading.Thread(target=self.KafkaKpiConsumer, args=()) thread = threading.Thread(target=self.KafkaKpiConsumer, args=())
ACTIVE_CONSUMERS.append(thread)
thread.start() thread.start()
def KafkaKpiConsumer(self): def KafkaKpiConsumer(self):
...@@ -55,7 +50,6 @@ class KpiValueWriter(GenericGrpcService): ...@@ -55,7 +50,6 @@ class KpiValueWriter(GenericGrpcService):
consumer = self.kafka_consumer consumer = self.kafka_consumer
consumer.subscribe([KafkaTopic.VALUE.value]) consumer.subscribe([KafkaTopic.VALUE.value])
LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value)) LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
print("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
while True: while True:
raw_kpi = consumer.poll(1.0) raw_kpi = consumer.poll(1.0)
if raw_kpi is None: if raw_kpi is None:
...@@ -69,30 +63,21 @@ class KpiValueWriter(GenericGrpcService): ...@@ -69,30 +63,21 @@ class KpiValueWriter(GenericGrpcService):
try: try:
kpi_value = json.loads(raw_kpi.value().decode('utf-8')) kpi_value = json.loads(raw_kpi.value().decode('utf-8'))
LOGGER.info("Received KPI : {:}".format(kpi_value)) LOGGER.info("Received KPI : {:}".format(kpi_value))
print("Received KPI : {:}".format(kpi_value))
self.get_kpi_descriptor(kpi_value, kpi_manager_client, metric_writer) self.get_kpi_descriptor(kpi_value, kpi_manager_client, metric_writer)
except Exception as e: except:
print("Error detail: {:}".format(e)) LOGGER.exception("Error detail: ")
continue continue
def get_kpi_descriptor(self, kpi_value: str, kpi_manager_client, metric_writer): def get_kpi_descriptor(self, kpi_value: str, kpi_manager_client, metric_writer):
print("--- START -----")
kpi_id = KpiId() kpi_id = KpiId()
kpi_id.kpi_id.uuid = kpi_value['kpi_uuid'] kpi_id.kpi_id.uuid = kpi_value['kpi_id'] # type: ignore
print("KpiId generated: {:}".format(kpi_id))
# print("Kpi manger client created: {:}".format(kpi_manager_client))
try: try:
kpi_descriptor_object = KpiDescriptor() kpi_descriptor_object = KpiDescriptor()
kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id) kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id)
# TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object???
if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid: if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid:
LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object)) LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
metric_writer.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value) metric_writer.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value)
else: else:
LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id)) LOGGER.info("No KPI Descriptor found in Database for Kpi ID: {:}".format(kpi_id))
print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id))
except Exception as e: except Exception as e:
LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e)) LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e))
print ("Unable to get KpiDescriptor. Error: {:}".format(e))
...@@ -14,15 +14,20 @@ ...@@ -14,15 +14,20 @@
# read Kafka stream from Kafka topic # read Kafka stream from Kafka topic
import os
import logging import logging
from prometheus_client import Gauge
from common.proto.kpi_sample_types_pb2 import KpiSampleType
from common.proto.kpi_value_api_pb2 import KpiValue from prometheus_client import Gauge
from common.proto.kpi_manager_pb2 import KpiDescriptor from prometheus_client.exposition import push_to_gateway
from prometheus_client.registry import CollectorRegistry
from common.proto.kpi_sample_types_pb2 import KpiSampleType
from common.proto.kpi_value_api_pb2 import KpiValue
from common.proto.kpi_manager_pb2 import KpiDescriptor
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
PROM_METRICS = {} PROM_METRICS = {}
GATEWAY_URL = os.getenv('PUSHGATEWAY_URL', 'prometheus-pushgateway.monitoring.svc.cluster.local:9091')
class MetricWriterToPrometheus: class MetricWriterToPrometheus:
''' '''
...@@ -30,7 +35,9 @@ class MetricWriterToPrometheus: ...@@ -30,7 +35,9 @@ class MetricWriterToPrometheus:
cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message) cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message)
''' '''
def __init__(self): def __init__(self):
pass self.job_name = 'kpivaluewriter'
self.registry = CollectorRegistry()
self.gateway_url = GATEWAY_URL
def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value): def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value):
# Creating a dictionary from the kpi_descriptor's attributes # Creating a dictionary from the kpi_descriptor's attributes
...@@ -44,26 +51,27 @@ class MetricWriterToPrometheus: ...@@ -44,26 +51,27 @@ class MetricWriterToPrometheus:
'slice_id' : kpi_descriptor.slice_id.slice_uuid.uuid, 'slice_id' : kpi_descriptor.slice_id.slice_uuid.uuid,
'connection_id' : kpi_descriptor.connection_id.connection_uuid.uuid, 'connection_id' : kpi_descriptor.connection_id.connection_uuid.uuid,
'link_id' : kpi_descriptor.link_id.link_uuid.uuid, 'link_id' : kpi_descriptor.link_id.link_uuid.uuid,
'time_stamp' : kpi_value.timestamp.timestamp, 'time_stamp' : kpi_value["time_stamp"],
'kpi_value' : kpi_value.kpi_value_type.floatVal 'kpi_value' : kpi_value["kpi_value"]
} }
LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi)) LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi))
return cooked_kpi return cooked_kpi
def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: KpiValue): def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: KpiValue):
# merge both gRPC messages into single varible. # merge both gRPC messages into single varible.
cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value) cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value)
tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'}
metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude] # These values will be used as metric tags metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude] # These values will be used as metric tags
metric_name = cooked_kpi['kpi_sample_type'] metric_name = cooked_kpi['kpi_sample_type']
try: try:
if metric_name not in PROM_METRICS: # Only register the metric, when it doesn't exists if metric_name not in PROM_METRICS: # Only register the metric, when it doesn't exists
PROM_METRICS[metric_name] = Gauge ( PROM_METRICS[metric_name] = Gauge (
metric_name, metric_name,
cooked_kpi['kpi_description'], cooked_kpi['kpi_description'],
metric_tags metric_tags,
registry=self.registry
) )
LOGGER.debug("Metric is created with labels: {:}".format(metric_tags)) LOGGER.debug("Metric is created with labels: {:}".format(metric_tags))
PROM_METRICS[metric_name].labels( PROM_METRICS[metric_name].labels(
kpi_id = cooked_kpi['kpi_id'], kpi_id = cooked_kpi['kpi_id'],
device_id = cooked_kpi['device_id'], device_id = cooked_kpi['device_id'],
...@@ -74,7 +82,11 @@ class MetricWriterToPrometheus: ...@@ -74,7 +82,11 @@ class MetricWriterToPrometheus:
link_id = cooked_kpi['link_id'], link_id = cooked_kpi['link_id'],
time_stamp = cooked_kpi['time_stamp'], time_stamp = cooked_kpi['time_stamp'],
).set(float(cooked_kpi['kpi_value'])) ).set(float(cooked_kpi['kpi_value']))
LOGGER.debug("Metric pushed to the endpoints: {:}".format(PROM_METRICS[metric_name])) LOGGER.debug("Metric is being pushed to the Gateway ... : {:}".format(PROM_METRICS[metric_name]))
# Push to the Prometheus Gateway, Prometheus is preconfigured to scrap the metrics from the gateway
push_to_gateway(self.gateway_url, job=self.job_name, registry=self.registry)
LOGGER.debug("Metric pushed to Prometheus Gateway.")
except ValueError as e: except ValueError as e:
if 'Duplicated timeseries' in str(e): if 'Duplicated timeseries' in str(e):
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import logging, signal, sys, threading import logging, signal, sys, threading
from prometheus_client import start_http_server
from kpi_value_writer.service.KpiValueWriter import KpiValueWriter from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
from common.Settings import get_log_level from common.Settings import get_log_level
...@@ -39,8 +38,6 @@ def main(): ...@@ -39,8 +38,6 @@ def main():
grpc_service = KpiValueWriter() grpc_service = KpiValueWriter()
grpc_service.start() grpc_service.start()
start_http_server(10808)
LOGGER.debug("Prometheus client is started on port 10808")
# Wait for Ctrl+C or termination signal # Wait for Ctrl+C or termination signal
while not terminate.wait(timeout=1.0): pass while not terminate.wait(timeout=1.0): pass
......
...@@ -12,14 +12,35 @@ ...@@ -12,14 +12,35 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import pytest
import time
import logging import logging
from kpi_value_writer.service.KpiValueWriter import KpiValueWriter from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
from kpi_manager.client.KpiManagerClient import KpiManagerClient
from common.tools.kafka.Variables import KafkaTopic from common.tools.kafka.Variables import KafkaTopic
from test_messages import create_kpi_descriptor_request
LOGGER = logging.getLogger(__name__)
# -------- Fixtures ----------------
@pytest.fixture(autouse=True)
def log_all_methods(request):
'''
This fixture logs messages before and after each test function runs, indicating the start and end of the test.
The autouse=True parameter ensures that this logging happens automatically for all tests in the module.
'''
LOGGER.info(f" >>>>> Starting test: {request.node.name} ")
yield
LOGGER.info(f" <<<<< Finished test: {request.node.name} ")
# @pytest.fixture(scope='module')
# def kpi_manager_client():
# LOGGER.debug("Yielding KpiManagerClient ...")
# yield KpiManagerClient(host="10.152.183.203")
# LOGGER.debug("KpiManagerClient is terminated.")
LOGGER = logging.getLogger(__name__)
# -------- Initial Test ---------------- # -------- Initial Test ----------------
def test_validate_kafka_topics(): def test_validate_kafka_topics():
...@@ -27,7 +48,15 @@ def test_validate_kafka_topics(): ...@@ -27,7 +48,15 @@ def test_validate_kafka_topics():
response = KafkaTopic.create_all_topics() response = KafkaTopic.create_all_topics()
assert isinstance(response, bool) assert isinstance(response, bool)
def test_KafkaConsumer(): # --------------
LOGGER.debug(" --->>> test_kafka_consumer: START <<<--- ") # NOT FOR GITHUB PIPELINE (Local testing only)
# kpi_value_writer = KpiValueWriter() # --------------
# kpi_value_writer.RunKafkaConsumer() # def test_KafkaConsumer(kpi_manager_client):
# # kpidescriptor = create_kpi_descriptor_request()
# # kpi_manager_client.SetKpiDescriptor(kpidescriptor)
# kpi_value_writer = KpiValueWriter()
# kpi_value_writer.KafkaKpiConsumer()
# LOGGER.debug(" waiting for timer to finish ")
# time.sleep(300)
...@@ -25,7 +25,8 @@ def create_kpi_id_request(): ...@@ -25,7 +25,8 @@ def create_kpi_id_request():
def create_kpi_descriptor_request(description: str = "Test Description"): def create_kpi_descriptor_request(description: str = "Test Description"):
_create_kpi_request = kpi_manager_pb2.KpiDescriptor() _create_kpi_request = kpi_manager_pb2.KpiDescriptor()
_create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) # _create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4())
_create_kpi_request.kpi_id.kpi_id.uuid = "efef4d95-1cf1-43c4-9742-95c283dddddd"
_create_kpi_request.kpi_description = description _create_kpi_request.kpi_description = description
_create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
_create_kpi_request.device_id.device_uuid.uuid = 'DEV4' _create_kpi_request.device_id.device_uuid.uuid = 'DEV4'
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment