Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (154)
Showing
with 496 additions and 87 deletions
......@@ -176,3 +176,6 @@ libyang/
# Other logs
**/logs/*.log.*
# PySpark checkpoints
src/analytics/.spark/*
......@@ -49,6 +49,6 @@ include:
- local: '/src/kpi_manager/.gitlab-ci.yml'
- local: '/src/kpi_value_api/.gitlab-ci.yml'
- local: '/src/kpi_value_writer/.gitlab-ci.yml'
- local: '/src/telemetry/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml'
......@@ -33,7 +33,7 @@ export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice n
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics"
# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
......@@ -215,6 +215,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
# Deploy QuestDB
./deploy/qdb.sh
# Deploy Apache Kafka
./deploy/kafka.sh
# Expose Dashboard
./deploy/expose_dashboard.sh
......
......@@ -20,50 +20,71 @@
# If not already set, set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
# If not already set, set the port Apache Kafka server will be exposed to.
export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"}
# If not already set, if flag is YES, Apache Kafka will be redeployed and all topics will be lost.
export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
########################################################################################################################
# Automated steps start here
########################################################################################################################
# Constants
TMP_FOLDER="./tmp"
KFK_MANIFESTS_PATH="manifests/kafka"
KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
KFK_MANIFEST="02-kafka.yaml"
# Constants
TMP_FOLDER="./tmp"
KFK_MANIFESTS_PATH="manifests/kafka"
KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
KFK_MANIFEST="02-kafka.yaml"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
mkdir -p ${TMP_MANIFESTS_FOLDER}
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
mkdir -p ${TMP_MANIFESTS_FOLDER}
function kafka_deploy() {
# copy zookeeper and kafka manifest files to temporary manifest location
cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
# copy zookeeper and kafka manifest files to temporary manifest location
cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
# echo "Apache Kafka Namespace"
echo ">>> Delete Apache Kafka Namespace"
kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
echo "Apache Kafka Namespace"
echo ">>> Delete Apache Kafka Namespace"
kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
echo ">>> Create Apache Kafka Namespace"
kubectl create namespace ${KFK_NAMESPACE}
echo ">>> Create Apache Kafka Namespace"
kubectl create namespace ${KFK_NAMESPACE}
# echo ">>> Deplying Apache Kafka Zookeeper"
# Kafka zookeeper service should be deployed before the kafka service
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
echo ">>> Deplying Apache Kafka Zookeeper"
# Kafka zookeeper service should be deployed before the kafka service
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically
KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}')
KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically
KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}')
# Kafka service should be deployed after the zookeeper service
sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
# Kafka service should be deployed after the zookeeper service
sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
# echo ">>> Deploying Apache Kafka Broker"
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
echo ">>> Deploying Apache Kafka Broker"
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
# echo ">>> Verifing Apache Kafka deployment"
sleep 5
# KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods)
# if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then
# echo "Deployment Error: \n $KFK_PODS_STATUS"
# else
# echo "$KFK_PODS_STATUS"
# fi
}
echo ">>> Verifing Apache Kafka deployment"
sleep 10
KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods)
if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then
echo "Deployment Error: \n $KFK_PODS_STATUS"
echo "Apache Kafka"
echo ">>> Checking if Apache Kafka is deployed ... "
if [ "$KFK_REDEPLOY" == "YES" ]; then
echo ">>> Redeploying kafka namespace"
kafka_deploy
elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then
echo ">>> Apache Kafka already present; skipping step."
else
echo "$KFK_PODS_STATUS"
fi
\ No newline at end of file
echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace"
kafka_deploy
fi
echo
......@@ -115,6 +115,17 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
# ----- Apache Kafka ------------------------------------------------------
# If not already set, set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
# If not already set, set the port Apache Kafka server will be exposed to.
export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"}
# If not already set, if flag is YES, Apache Kafka will be redeployed and topic will be lost.
export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
########################################################################################################################
# Automated steps start here
########################################################################################################################
......@@ -147,7 +158,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type=
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for KPI Management"
echo "Create secret with CockroachDB data for KPI Management microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
......@@ -159,6 +170,37 @@ kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --t
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for Telemetry microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_TELEMETRY="tfs_telemetry" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for Analytics microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_ANALYTICS="tfs_analytics" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-analytics --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_ANALYTICS} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with Apache Kafka data for KPI, Telemetry and Analytics microservices"
KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}')
kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \
--from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT}
printf "\n"
echo "Create secret with NATS data"
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_CLIENT_PORT" ]; then
......@@ -234,15 +276,17 @@ for COMPONENT in $TFS_COMPONENTS; do
if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
$DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then
elif [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
$DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
$DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds
IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
$DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
if [ "$COMPONENT" == "pathcomp" ]; then
# next command is redundant, but helpful to keep cache updated between rebuilds
IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
$DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
fi
elif [ "$COMPONENT" == "dlt" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
$DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
......@@ -255,7 +299,7 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
if [ "$COMPONENT" == "pathcomp" ]; then
if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ] ; then
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
......@@ -306,7 +350,7 @@ for COMPONENT in $TFS_COMPONENTS; do
cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
fi
if [ "$COMPONENT" == "pathcomp" ]; then
if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
......
# ContainerLab
The setup consists of a management network for configuring and managing nodes.
srl1 and srl2 are interconnected.
client1 is connected to srl1 and client2 to srl2.
Routing between client1 and client2 is set up via the Nokia SR Linux nodes.
## Management Network
Name: mgmt-net
Subnet: 172.100.100.0/24
## Node Kinds
Nokia SR Linux: Image ghcr.io/nokia/srlinux:23.10.3
Linux: Image ghcr.io/hellt/network-multitool
## Nodes
### Nokia SR Linux
- Type: ixr6
- CPU: 0.5
- Memory: 2GB
- Management IP: 172.100.100.101
The provided SR Linux CLI commands in the _srl.cli_ enables system management and configures the GNMI server with OpenConfig models.
### Linux
Assigns IP 172.16.1.10/24 to eth1 and adds route to 172.16.2.0/24 via 172.16.1.1
In this topology file, the clients are pre-configured with the respectivly IP addresses in their interfaces and routes in their IP tables.
### Links
- Connect srl1:e1-1 to srl2:e1-1
- Connect client1:eth1 to srl1:e1-2
- Connect client2:eth1 to srl2:e1-2
\ No newline at end of file
......@@ -83,19 +83,19 @@ $ssh admin@clab-tfs-scenario-srl1
# Check configurations done:
gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl1-nis.json
gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl1-ifs.json
gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl2-nis.json
gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl2-ifs.json
gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl1-nis.json
gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl1-ifs.json
gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/network-instances' > srl2-nis.json
gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path '/interfaces' > srl2-ifs.json
# Delete elements:
gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]'
gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]'
gnmic -a 172.100.100.101 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]'
gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]'
gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]'
gnmic -a 172.100.100.102 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]'
gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]'
gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]'
gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]'
gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/network-instances/network-instance[name=b19229e8]'
gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]'
gnmic -a clab-tfs-scenario-srl2 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]'
# Run gNMI Driver in standalone mode (advanced)
PYTHONPATH=./src python -m src.device.tests.test_gnmi
set / system management openconfig admin-state enable
set / system gnmi-server network-instance mgmt yang-models openconfig
......@@ -23,35 +23,41 @@ mgmt:
topology:
kinds:
srl:
image: ghcr.io/nokia/srlinux:23.3.1
nokia_srlinux:
image: ghcr.io/nokia/srlinux:23.10.3
linux:
image: ghcr.io/hellt/network-multitool
nodes:
srl1:
kind: srl
kind: nokia_srlinux
type: ixr6
cpu: 0.5
memory: 1GB
memory: 2GB
mgmt-ipv4: 172.100.100.101
#startup-config: srl1.cli
startup-config: srl.cli
srl2:
kind: srl
kind: nokia_srlinux
type: ixr6
cpu: 0.5
memory: 1GB
memory: 2GB
mgmt-ipv4: 172.100.100.102
#startup-config: srl2.cli
startup-config: srl.cli
client1:
kind: linux
cpu: 0.1
memory: 100MB
mgmt-ipv4: 172.100.100.201
exec:
- ip address add 172.16.1.10/24 dev eth1
- ip route add 172.16.2.0/24 via 172.16.1.1
client2:
kind: linux
cpu: 0.1
memory: 100MB
mgmt-ipv4: 172.100.100.202
exec:
- ip address add 172.16.2.10/24 dev eth1
- ip route add 172.16.1.0/24 via 172.16.2.1
links:
- endpoints: ["srl1:e1-1", "srl2:e1-1"]
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: analyticsservice
spec:
selector:
matchLabels:
app: analyticsservice
#replicas: 1
template:
metadata:
labels:
app: analyticsservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: frontend
image: labs.etsi.org:5050/tfs/controller/analytics-frontend:latest
imagePullPolicy: Always
ports:
- containerPort: 30080
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: crdb-analytics
- secretRef:
name: kfk-kpi-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30080"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30080"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: backend
image: labs.etsi.org:5050/tfs/controller/analytics-backend:latest
imagePullPolicy: Always
ports:
- containerPort: 30090
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: kfk-kpi-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30090"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30090"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: analyticsservice
labels:
app: analyticsservice
spec:
type: ClusterIP
selector:
app: analyticsservice
ports:
- name: frontend-grpc
protocol: TCP
port: 30080
targetPort: 30080
- name: backend-grpc
protocol: TCP
port: 30090
targetPort: 30090
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: analyticsservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: analyticsservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -53,9 +53,9 @@ spec:
- name: KAFKA_LISTENERS
value: PLAINTEXT://:9092
- name: KAFKA_ADVERTISED_LISTENERS
value: PLAINTEXT://localhost:9092
value: PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092
image: wurstmeister/kafka
imagePullPolicy: IfNotPresent
name: kafka-broker
ports:
- containerPort: 9092
\ No newline at end of file
- containerPort: 9092
......@@ -39,6 +39,9 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: kfk-kpi-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30020"]
......
......@@ -39,6 +39,9 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: kfk-kpi-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30030"]
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: telemetryservice
spec:
selector:
matchLabels:
app: telemetryservice
#replicas: 1
template:
metadata:
labels:
app: telemetryservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: frontend
image: labs.etsi.org:5050/tfs/controller/telemetry-frontend:latest
imagePullPolicy: Always
ports:
- containerPort: 30050
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: crdb-telemetry
- secretRef:
name: kfk-kpi-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30050"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30050"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: backend
image: labs.etsi.org:5050/tfs/controller/telemetry-backend:latest
imagePullPolicy: Always
ports:
- containerPort: 30060
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: kfk-kpi-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30060"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30060"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: telemetryservice
labels:
app: telemetryservice
spec:
type: ClusterIP
selector:
app: telemetryservice
ports:
- name: frontend-grpc
protocol: TCP
port: 30050
targetPort: 30050
- name: backend-grpc
protocol: TCP
port: 30060
targetPort: 30060
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: telemetryservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: telemetryservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -181,3 +181,8 @@ export GRAF_EXT_PORT_HTTP="3000"
# Set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE="kafka"
# Set the port Apache Kafka server will be exposed to.
export KFK_SERVER_PORT="9092"
# Set the flag to YES for redeploying of Apache Kafka
export KFK_REDEPLOY=""
......@@ -30,21 +30,25 @@ message AnalyzerId {
}
enum AnalyzerOperationMode {
ANALYZEROPERATIONMODE_BATCH = 0;
ANALYZEROPERATIONMODE_STREAMING = 1;
ANALYZEROPERATIONMODE_UNSPECIFIED = 0;
ANALYZEROPERATIONMODE_BATCH = 1;
ANALYZEROPERATIONMODE_STREAMING = 2;
}
// duration field may be added in analyzer...
message Analyzer {
string algorithm_name = 1; // The algorithm to be executed
repeated kpi_manager.KpiId input_kpi_ids = 2; // The KPI Ids to be processed by the analyzer
repeated kpi_manager.KpiId output_kpi_ids = 3; // The KPI Ids produced by the analyzer
AnalyzerOperationMode operation_mode = 4; // Operation mode of the analyzer
// In batch mode...
float batch_min_duration_s = 5; // ..., min duration to collect before executing batch
float batch_max_duration_s = 6; // ..., max duration collected to execute the batch
uint64 batch_min_size = 7; // ..., min number of samples to collect before executing batch
uint64 batch_max_size = 8; // ..., max number of samples collected to execute the batch
AnalyzerId analyzer_id = 1;
string algorithm_name = 2; // The algorithm to be executed
float duration_s = 3; // Termiate the data analytics thread after duration (seconds); 0 = infinity time
repeated kpi_manager.KpiId input_kpi_ids = 4; // The KPI Ids to be processed by the analyzer
repeated kpi_manager.KpiId output_kpi_ids = 5; // The KPI Ids produced by the analyzer
AnalyzerOperationMode operation_mode = 6; // Operation mode of the analyzer
map<string, string> parameters = 7; // Add dictionary of (key, value) pairs such as (window_size, 10) etc.
// In batch mode...
float batch_min_duration_s = 8; // ..., min duration to collect before executing batch
float batch_max_duration_s = 9; // ..., max duration collected to execute the batch
uint64 batch_min_size = 10; // ..., min number of samples to collect before executing batch
uint64 batch_max_size = 11; // ..., max number of samples collected to execute the batch
}
message AnalyzerFilter {
......
......@@ -19,9 +19,9 @@ import "context.proto";
import "kpi_manager.proto";
service TelemetryFrontendService {
rpc StartCollector (Collector ) returns (CollectorId ) {}
rpc StopCollector (CollectorId ) returns (context.Empty) {}
rpc SelectCollectors(CollectorFilter) returns (CollectorList) {}
rpc StartCollector (Collector ) returns (CollectorId ) {}
rpc StopCollector (CollectorId ) returns (context.Empty) {}
rpc SelectCollectors (CollectorFilter) returns (CollectorList) {}
}
message CollectorId {
......@@ -29,10 +29,12 @@ message CollectorId {
}
message Collector {
CollectorId collector_id = 1; // The Collector ID
kpi_manager.KpiId kpi_id = 2; // The KPI Id to be associated to the collected samples
float duration_s = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely
float interval_s = 4; // Interval between collected samples
CollectorId collector_id = 1; // The Collector ID
kpi_manager.KpiId kpi_id = 2; // The KPI Id to be associated to the collected samples
float duration_s = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely
float interval_s = 4; // Interval between collected samples
context.Timestamp start_time = 5; // Timestamp when Collector start execution
context.Timestamp end_time = 6; // Timestamp when Collector stop execution
}
message CollectorFilter {
......
......@@ -17,10 +17,8 @@
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
# RCFILE=$PROJECTDIR/coverage/.coveragerc
# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
# kpi_manager/tests/test_unitary.py
RCFILE=$PROJECTDIR/coverage/.coveragerc
python3 -m pytest --log-cli-level=INFO --verbose \
telemetry/database/tests/managementDBtests.py
CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
analytics/tests/test_analytics_db.py
#!/bin/bash
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
analytics/frontend/tests/test_frontend.py
......@@ -24,7 +24,7 @@ cd $PROJECTDIR/src
# python3 kpi_manager/tests/test_unitary.py
RCFILE=$PROJECTDIR/coverage/.coveragerc
CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
kpi_manager/tests/test_kpi_manager.py