Skip to content
Snippets Groups Projects
Commit 09b235e4 authored by Pablo Armingol's avatar Pablo Armingol
Browse files

Merge branch 'develop' of https://labs.etsi.org/rep/tfs/controller into...

Merge branch 'develop' of https://labs.etsi.org/rep/tfs/controller into feat/173-tid-add-inventory-nbi-to-webui
parents 0762893a ed989581
No related branches found
No related tags found
2 merge requests!294Release TeraFlowSDN 4.0,!251Resolve "(TID) Add inventory NBI to WebUI"
Showing
with 839 additions and 40 deletions
...@@ -27,7 +27,47 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} ...@@ -27,7 +27,47 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed # By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"} export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"}
# Uncomment to activate Monitoring (old)
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
# Uncomment to activate Optical Controller
# To manage optical connections, "service" requires "opticalcontroller" to be deployed
# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
# BEFORE="${TFS_COMPONENTS% service*}"
# AFTER="${TFS_COMPONENTS#* service}"
# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
#fi
# Uncomment to activate ZTP
#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
# Uncomment to activate Policy Manager
#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
# Uncomment to activate Optical CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
# Uncomment to activate L3 CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
# Uncomment to activate TE
#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
# Uncomment to activate Forecaster
#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
# Uncomment to activate E2E Orchestrator
#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
# If not already set, set the tag you want to use for your images. # If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
...@@ -67,8 +107,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} ...@@ -67,8 +107,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
# checking/deploying CockroachDB.
# - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
...@@ -80,7 +118,7 @@ export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} ...@@ -80,7 +118,7 @@ export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
# If not already set, disable flag for dropping database, if it exists. # If not already set, disable flag for dropping database, if it exists.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while
# checking/deploying CockroachDB. # checking/deploying CockroachDB.
export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""} export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""}
...@@ -102,6 +140,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} ...@@ -102,6 +140,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
# If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
# with 3 replicas (set by default) will be deployed. It is convenient for production and
# provides scalability features.
export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
# If not already set, disable flag for re-deploying NATS from scratch. # If not already set, disable flag for re-deploying NATS from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
...@@ -137,7 +183,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} ...@@ -137,7 +183,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
# If not already set, disable flag for dropping tables if they exist. # If not already set, disable flag for dropping tables if they exist.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables # If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while # QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while
# checking/deploying QuestDB. # checking/deploying QuestDB.
export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
......
...@@ -37,8 +37,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} ...@@ -37,8 +37,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
# checking/deploying CockroachDB.
# - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
...@@ -48,7 +46,7 @@ export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} ...@@ -48,7 +46,7 @@ export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
# Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html # Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html
export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
# If not already set, disable flag for dropping database if exists. # If not already set, disable flag for dropping database, if it exists.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while
# checking/deploying CockroachDB. # checking/deploying CockroachDB.
...@@ -79,7 +77,7 @@ function crdb_deploy_single() { ...@@ -79,7 +77,7 @@ function crdb_deploy_single() {
kubectl create namespace ${CRDB_NAMESPACE} kubectl create namespace ${CRDB_NAMESPACE}
echo echo
echo "CockroachDB (single-node)" echo "CockroachDB (single-mode)"
echo ">>> Checking if CockroachDB is deployed..." echo ">>> Checking if CockroachDB is deployed..."
if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
echo ">>> CockroachDB is present; skipping step." echo ">>> CockroachDB is present; skipping step."
...@@ -139,7 +137,7 @@ function crdb_deploy_single() { ...@@ -139,7 +137,7 @@ function crdb_deploy_single() {
} }
function crdb_undeploy_single() { function crdb_undeploy_single() {
echo "CockroachDB" echo "CockroachDB (single-mode)"
echo ">>> Checking if CockroachDB is deployed..." echo ">>> Checking if CockroachDB is deployed..."
if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
echo ">>> Undeploy CockroachDB" echo ">>> Undeploy CockroachDB"
...@@ -223,7 +221,7 @@ function crdb_deploy_cluster() { ...@@ -223,7 +221,7 @@ function crdb_deploy_cluster() {
kubectl create namespace ${CRDB_NAMESPACE} kubectl create namespace ${CRDB_NAMESPACE}
echo echo
echo "CockroachDB" echo "CockroachDB (cluster-mode)"
echo ">>> Checking if CockroachDB is deployed..." echo ">>> Checking if CockroachDB is deployed..."
if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
echo ">>> CockroachDB is present; skipping step." echo ">>> CockroachDB is present; skipping step."
...@@ -319,7 +317,7 @@ function crdb_undeploy_cluster() { ...@@ -319,7 +317,7 @@ function crdb_undeploy_cluster() {
fi fi
echo echo
echo "CockroachDB" echo "CockroachDB (cluster-mode)"
echo ">>> Checking if CockroachDB is deployed..." echo ">>> Checking if CockroachDB is deployed..."
if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
echo ">>> Undeploy CockroachDB" echo ">>> Undeploy CockroachDB"
......
#!/bin/bash
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Read deployment settings
########################################################################################################################
# If not already set, set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
# Constants
TMP_FOLDER="./tmp"
KFK_MANIFESTS_PATH="manifests/kafka"
KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
KFK_MANIFEST="02-kafka.yaml"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
mkdir -p ${TMP_MANIFESTS_FOLDER}
# copy zookeeper and kafka manifest files to temporary manifest location
cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
echo "Apache Kafka Namespace"
echo ">>> Delete Apache Kafka Namespace"
kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
echo ">>> Create Apache Kafka Namespace"
kubectl create namespace ${KFK_NAMESPACE}
echo ">>> Deplying Apache Kafka Zookeeper"
# Kafka zookeeper service should be deployed before the kafka service
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically
KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}')
# Kafka service should be deployed after the zookeeper service
sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
echo ">>> Deploying Apache Kafka Broker"
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
echo ">>> Verifing Apache Kafka deployment"
sleep 10
KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods)
if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then
echo "Deployment Error: \n $KFK_PODS_STATUS"
else
echo "$KFK_PODS_STATUS"
fi
\ No newline at end of file
...@@ -27,6 +27,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} ...@@ -27,6 +27,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
# If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
# with 3 replicas (set by default) will be deployed. It is convenient for production and
# provides scalability features.
export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
# If not already set, disable flag for re-deploying NATS from scratch. # If not already set, disable flag for re-deploying NATS from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
...@@ -37,6 +45,14 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} ...@@ -37,6 +45,14 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
# Automated steps start here # Automated steps start here
######################################################################################################################## ########################################################################################################################
# Constants
TMP_FOLDER="./tmp"
NATS_MANIFESTS_PATH="manifests/nats"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER
function nats_deploy_single() { function nats_deploy_single() {
echo "NATS Namespace" echo "NATS Namespace"
echo ">>> Create NATS Namespace (if missing)" echo ">>> Create NATS Namespace (if missing)"
...@@ -47,18 +63,86 @@ function nats_deploy_single() { ...@@ -47,18 +63,86 @@ function nats_deploy_single() {
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo echo
echo "Install NATS (single-mode)"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
echo ">>> NATS is present; skipping step."
else
echo ">>> Deploy NATS"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true
echo ">>> Waiting NATS statefulset to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
printf "%c" "."
sleep 1
done
# Wait for statefulset condition "Available=True" does not work
# Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
# "error: readyReplicas is not found"
# Workaround: Check the pods are ready
#echo ">>> NATS statefulset created. Waiting for readiness condition..."
#kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats
#kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
# statefulset/nats
echo ">>> NATS statefulset created. Waiting NATS pods to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do
printf "%c" "."
sleep 1
done
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
fi
echo
echo "NATS Port Mapping"
echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})"
NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}')
PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
}
function nats_deploy_cluster() {
echo "NATS Namespace"
echo ">>> Create NATS Namespace (if missing)"
kubectl create namespace ${NATS_NAMESPACE}
echo
echo "Add NATS Helm Chart"
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo
echo "Upgrade NATS Helm Chart" echo "Upgrade NATS Helm Chart"
helm3 repo update nats helm3 repo update nats
echo echo
echo "Install NATS (single-node)" echo "Install NATS (cluster-mode)"
echo ">>> Checking if NATS is deployed..." echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
echo ">>> NATS is present; skipping step." echo ">>> NATS is present; skipping step."
else else
echo ">>> Deploy NATS" echo ">>> Deploy NATS"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
echo ">>> Waiting NATS statefulset to be created..." echo ">>> Waiting NATS statefulset to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
printf "%c" "." printf "%c" "."
...@@ -78,7 +162,17 @@ function nats_deploy_single() { ...@@ -78,7 +162,17 @@ function nats_deploy_single() {
printf "%c" "." printf "%c" "."
sleep 1 sleep 1
done done
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do
printf "%c" "."
sleep 1
done
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do
printf "%c" "."
sleep 1
done
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0 kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2
fi fi
echo echo
...@@ -110,7 +204,7 @@ function nats_deploy_single() { ...@@ -110,7 +204,7 @@ function nats_deploy_single() {
echo echo
} }
function nats_undeploy_single() { function nats_undeploy() {
echo "NATS" echo "NATS"
echo ">>> Checking if NATS is deployed..." echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
...@@ -128,7 +222,13 @@ function nats_undeploy_single() { ...@@ -128,7 +222,13 @@ function nats_undeploy_single() {
} }
if [ "$NATS_REDEPLOY" == "YES" ]; then if [ "$NATS_REDEPLOY" == "YES" ]; then
nats_undeploy_single nats_undeploy
fi fi
nats_deploy_single if [ "$NATS_DEPLOY_MODE" == "single" ]; then
nats_deploy_single
elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then
nats_deploy_cluster
else
echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE"
fi
\ No newline at end of file
...@@ -44,7 +44,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"} ...@@ -44,7 +44,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
# If not already set, disable flag for dropping tables if they exist. # If not already set, disable flag for dropping tables if they exist.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION! # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables # If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped # QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped
# while checking/deploying QuestDB. # while checking/deploying QuestDB.
export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""} export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
......
...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} ...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed # By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"} export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"}
# If not already set, set the tag you want to use for your images. # If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
...@@ -137,10 +137,23 @@ printf "\n" ...@@ -137,10 +137,23 @@ printf "\n"
echo "Create secret with CockroachDB data" echo "Create secret with CockroachDB data"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_CONTEXT=${CRDB_DATABASE} # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ --from-literal=CRDB_DATABASE=${CRDB_DATABASE_CONTEXT} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for KPI Management"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_KPI_MGMT} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require --from-literal=CRDB_SSLMODE=require
...@@ -204,6 +217,14 @@ if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then ...@@ -204,6 +217,14 @@ if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
DOCKER_BUILD="docker buildx build" DOCKER_BUILD="docker buildx build"
fi fi
LINKERD_STATUS="$(microk8s status -a linkerd)"
if [[ $linkerd_status =~ "enabled" ]]; then
echo "LinkerD installed: workloads will be injected"
else
echo "LinkerD not installed"
fi
printf "\n"
for COMPONENT in $TFS_COMPONENTS; do for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..." echo "Processing '$COMPONENT' component..."
...@@ -279,8 +300,11 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -279,8 +300,11 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Adapting '$COMPONENT' manifest file..." echo " Adapting '$COMPONENT' manifest file..."
MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
# cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" if [[ $linkerd_status =~ "enabled" ]]; then
cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST" cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
else
cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
fi
if [ "$COMPONENT" == "pathcomp" ]; then if [ "$COMPONENT" == "pathcomp" ]; then
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice" ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice"
ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector" ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector"
ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector" ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
ALL_COMPONENTS="${ALL_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS} TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
# Some components require libyang built from source code # Some components require libyang built from source code
......
...@@ -39,8 +39,8 @@ spec: ...@@ -39,8 +39,8 @@ spec:
cpu: 8 cpu: 8
memory: 8Gi memory: 8Gi
tlsEnabled: true tlsEnabled: true
# You can set either a version of the db or a specific image name # You can set either a version of the db or a specific image name
# cockroachDBVersion: v22.2.8 # cockroachDBVersion: v22.2.8
image: image:
name: cockroachdb/cockroach:v22.2.8 name: cockroachdb/cockroach:v22.2.8
# nodes refers to the number of crdb pods that are created # nodes refers to the number of crdb pods that are created
...@@ -49,21 +49,16 @@ spec: ...@@ -49,21 +49,16 @@ spec:
additionalLabels: additionalLabels:
crdb: is-cool crdb: is-cool
# affinity is a new API field that is behind a feature gate that is # affinity is a new API field that is behind a feature gate that is
# disabled by default. To enable please see the operator.yaml file. # disabled by default. To enable please see the operator.yaml file.
# The affinity field will accept any podSpec affinity rule. # The affinity field will accept any podSpec affinity rule.
# affinity: topologySpreadConstraints:
# podAntiAffinity: - maxSkew: 1
# preferredDuringSchedulingIgnoredDuringExecution: topologyKey: kubernetes.io/hostname
# - weight: 100 whenUnsatisfiable: ScheduleAnyway
# podAffinityTerm: labelSelector:
# labelSelector: matchLabels:
# matchExpressions: app.kubernetes.io/instance: cockroachdb
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - cockroachdb
# topologyKey: kubernetes.io/hostname
# nodeSelectors used to match against # nodeSelectors used to match against
# nodeSelector: # nodeSelector:
......
...@@ -381,6 +381,7 @@ spec: ...@@ -381,6 +381,7 @@ spec:
spec: spec:
containers: containers:
- args: - args:
- -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true
- -zap-log-level - -zap-log-level
- info - info
env: env:
......
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
labels:
app: zookeeper-service
name: zookeeper-service
namespace: kafka
spec:
type: NodePort
ports:
- name: zookeeper-port
port: 2181
nodePort: 30181
targetPort: 2181
selector:
app: zookeeper
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: zookeeper
name: zookeeper
namespace: kafka
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper
template:
metadata:
labels:
app: zookeeper
spec:
containers:
- image: wurstmeister/zookeeper
imagePullPolicy: IfNotPresent
name: zookeeper
ports:
- containerPort: 2181
\ No newline at end of file
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
labels:
app: kafka-broker
name: kafka-service
namespace: kafka
spec:
ports:
- port: 9092
selector:
app: kafka-broker
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kafka-broker
name: kafka-broker
namespace: kafka
spec:
replicas: 1
selector:
matchLabels:
app: kafka-broker
template:
metadata:
labels:
app: kafka-broker
spec:
hostname: kafka-broker
containers:
- env:
- name: KAFKA_BROKER_ID
value: "1"
- name: KAFKA_ZOOKEEPER_CONNECT
value: <ZOOKEEPER_INTERNAL_IP>:2181
- name: KAFKA_LISTENERS
value: PLAINTEXT://:9092
- name: KAFKA_ADVERTISED_LISTENERS
value: PLAINTEXT://localhost:9092
image: wurstmeister/kafka
imagePullPolicy: IfNotPresent
name: kafka-broker
ports:
- containerPort: 9092
\ No newline at end of file
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: kpi-managerservice
spec:
selector:
matchLabels:
app: kpi-managerservice
#replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-outbound-ports: "4222"
labels:
app: kpi-managerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/kpi_manager:latest
imagePullPolicy: Always
ports:
- containerPort: 30010
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: crdb-kpi-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30010"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30010"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: kpi-managerservice
labels:
app: kpi-managerservice
spec:
type: ClusterIP
selector:
app: kpi-managerservice
ports:
- name: grpc
protocol: TCP
port: 30010
targetPort: 30010
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: kpi-managerservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: kpi-managerservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: kpi-value-apiservice
spec:
selector:
matchLabels:
app: kpi-value-apiservice
#replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-outbound-ports: "4222"
labels:
app: kpi-value-apiservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/kpi_value_api:latest
imagePullPolicy: Always
ports:
- containerPort: 30020
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30020"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30020"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: kpi-value-apiservice
labels:
app: kpi-value-apiservice
spec:
type: ClusterIP
selector:
app: kpi-value-apiservice
ports:
- name: grpc
protocol: TCP
port: 30020
targetPort: 30020
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: kpi-value-apiservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: kpi-value-apiservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: kpi-value-writerservice
spec:
selector:
matchLabels:
app: kpi-value-writerservice
#replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-outbound-ports: "4222"
labels:
app: kpi-value-writerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/kpi_value_writer:latest
imagePullPolicy: Always
ports:
- containerPort: 30030
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30030"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30030"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: kpi-value-writerservice
labels:
app: kpi-value-writerservice
spec:
type: ClusterIP
selector:
app: kpi-value-writerservice
ports:
- name: grpc
protocol: TCP
port: 30030
targetPort: 30030
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: kpi-value-writerservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: kpi-value-writerservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
container:
image:
tags: 2.9-alpine
env:
# different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB
# should be ~90% of memory limit
GOMEMLIMIT: 400MiB
merge:
# recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters
resources:
requests:
cpu: 1
memory: 500Mi
limits:
cpu: 1
memory: 1Gi
config:
cluster:
enabled: true
replicas: 3
jetstream:
enabled: true
fileStore:
pvc:
size: 4Gi
# Force one pod per node, if possible
podTemplate:
topologySpreadConstraints:
kubernetes.io/hostname:
maxSkew: 1
whenUnsatisfiable: ScheduleAnyway
...@@ -18,6 +18,11 @@ metadata: ...@@ -18,6 +18,11 @@ metadata:
name: tfs-ingress name: tfs-ingress
annotations: annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2 nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/limit-rps: "50"
nginx.ingress.kubernetes.io/limit-connections: "50"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "50"
nginx.ingress.kubernetes.io/proxy-send-timeout: "50"
nginx.ingress.kubernetes.io/proxy-read-timeout: "50"
spec: spec:
rules: rules:
- http: - http:
......
...@@ -117,3 +117,25 @@ spec: ...@@ -117,3 +117,25 @@ spec:
- name: grafana - name: grafana
port: 3000 port: 3000
targetPort: 3000 targetPort: 3000
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: webuiservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: webuiservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
...@@ -22,9 +22,12 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" ...@@ -22,9 +22,12 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy. # Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator" export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator"
# Uncomment to activate Monitoring # Uncomment to activate Monitoring (old)
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
# Uncomment to activate BGP-LS Speaker # Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
...@@ -69,7 +72,7 @@ export TFS_K8S_NAMESPACE="tfs" ...@@ -69,7 +72,7 @@ export TFS_K8S_NAMESPACE="tfs"
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
# Uncomment to monitor performance of components # Uncomment to monitor performance of components
export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
# Uncomment when deploying Optical CyberSecurity # Uncomment when deploying Optical CyberSecurity
#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
...@@ -123,6 +126,10 @@ export NATS_EXT_PORT_CLIENT="4222" ...@@ -123,6 +126,10 @@ export NATS_EXT_PORT_CLIENT="4222"
# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. # Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP="8222" export NATS_EXT_PORT_HTTP="8222"
# Set NATS installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/nats.sh for additional details
export NATS_DEPLOY_MODE="single"
# Disable flag for re-deploying NATS from scratch. # Disable flag for re-deploying NATS from scratch.
export NATS_REDEPLOY="" export NATS_REDEPLOY=""
...@@ -167,3 +174,10 @@ export PROM_EXT_PORT_HTTP="9090" ...@@ -167,3 +174,10 @@ export PROM_EXT_PORT_HTTP="9090"
# Set the external port Grafana HTTP Dashboards will be exposed to. # Set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP="3000" export GRAF_EXT_PORT_HTTP="3000"
# ----- Apache Kafka -----------------------------------------------------------
# Set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE="kafka"
...@@ -46,6 +46,7 @@ message AclMatch { ...@@ -46,6 +46,7 @@ message AclMatch {
uint32 dst_port = 6; uint32 dst_port = 6;
uint32 start_mpls_label = 7; uint32 start_mpls_label = 7;
uint32 end_mpls_label = 8; uint32 end_mpls_label = 8;
string tcp_flags = 9;
} }
message AclAction { message AclAction {
......
// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package analytics_frontend;
import "context.proto";
import "kpi_manager.proto";
//import "kpi_sample_types.proto";
service AnalyticsFrontendService {
rpc StartAnalyzer (Analyzer ) returns (AnalyzerId ) {}
rpc StopAnalyzer (AnalyzerId ) returns (context.Empty) {}
rpc SelectAnalyzers(AnalyzerFilter) returns (AnalyzerList ) {}
}
message AnalyzerId {
context.Uuid analyzer_id = 1;
}
enum AnalyzerOperationMode {
ANALYZEROPERATIONMODE_BATCH = 0;
ANALYZEROPERATIONMODE_STREAMING = 1;
}
message Analyzer {
string algorithm_name = 1; // The algorithm to be executed
repeated kpi_manager.KpiId input_kpi_ids = 2; // The KPI Ids to be processed by the analyzer
repeated kpi_manager.KpiId output_kpi_ids = 3; // The KPI Ids produced by the analyzer
AnalyzerOperationMode operation_mode = 4; // Operation mode of the analyzer
// In batch mode...
float batch_min_duration_s = 5; // ..., min duration to collect before executing batch
float batch_max_duration_s = 6; // ..., max duration collected to execute the batch
uint64 batch_min_size = 7; // ..., min number of samples to collect before executing batch
uint64 batch_max_size = 8; // ..., max number of samples collected to execute the batch
}
message AnalyzerFilter {
// Analyzer that fulfill the filter are those that match ALL the following fields.
// An empty list means: any value is accepted.
// All fields empty means: list all Analyzers
repeated AnalyzerId analyzer_id = 1;
repeated string algorithm_names = 2;
repeated kpi_manager.KpiId input_kpi_ids = 3;
repeated kpi_manager.KpiId output_kpi_ids = 4;
//repeated kpi_sample_types.KpiSampleType kpi_sample_type = 5; // Not implemented
//repeated context.DeviceId device_id = 6; // Not implemented
//repeated context.EndPointId endpoint_id = 7; // Not implemented
//repeated context.ServiceId service_id = 8; // Not implemented
//repeated context.SliceId slice_id = 9; // Not implemented
//repeated context.ConnectionId connection_id = 10; // Not implemented
//repeated context.LinkId link_id = 11; // Not implemented
}
message AnalyzerList {
repeated Analyzer analyzer_list = 1;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment