Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • tfs/controller
1 result
Select Git revision
Show changes
Commits on Source (154)
Showing
with 317 additions and 225 deletions
...@@ -27,6 +27,7 @@ include: ...@@ -27,6 +27,7 @@ include:
- local: '/src/context/.gitlab-ci.yml' - local: '/src/context/.gitlab-ci.yml'
- local: '/src/device/.gitlab-ci.yml' - local: '/src/device/.gitlab-ci.yml'
- local: '/src/service/.gitlab-ci.yml' - local: '/src/service/.gitlab-ci.yml'
- local: '/src/qkd_app/.gitlab-ci.yml'
- local: '/src/dbscanserving/.gitlab-ci.yml' - local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml' - local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml' - local: '/src/opticalattackdetector/.gitlab-ci.yml'
...@@ -54,6 +55,8 @@ include: ...@@ -54,6 +55,8 @@ include:
- local: '/src/qos_profile/.gitlab-ci.yml' - local: '/src/qos_profile/.gitlab-ci.yml'
- local: '/src/vnt_manager/.gitlab-ci.yml' - local: '/src/vnt_manager/.gitlab-ci.yml'
- local: '/src/e2e_orchestrator/.gitlab-ci.yml' - local: '/src/e2e_orchestrator/.gitlab-ci.yml'
- local: '/src/ztp_server/.gitlab-ci.yml'
- local: '/src/osm_client/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests # This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml' - local: '/src/tests/.gitlab-ci.yml'
...@@ -13,14 +13,17 @@ ...@@ -13,14 +13,17 @@
# limitations under the License. # limitations under the License.
coverage==6.3 coverage==6.3
grpcio==1.47.* # grpcio==1.47.*
grpcio==1.60.0
grpcio-health-checking==1.47.* grpcio-health-checking==1.47.*
grpcio-reflection==1.47.* grpcio-reflection==1.47.*
grpcio-tools==1.47.* # grpcio-tools==1.47.*
grpcio-tools==1.60.0
grpclib==0.4.4 grpclib==0.4.4
prettytable==3.5.0 prettytable==3.5.0
prometheus-client==0.13.0 prometheus-client==0.13.0
protobuf==3.20.* # protobuf==3.20.*
protobuf==4.21.6
pytest==6.2.5 pytest==6.2.5
pytest-benchmark==3.4.1 pytest-benchmark==3.4.1
python-dateutil==2.8.2 python-dateutil==2.8.2
......
...@@ -151,6 +151,26 @@ export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"} ...@@ -151,6 +151,26 @@ export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
export NATS_REDEPLOY=${NATS_REDEPLOY:-""} export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
# ----- Apache Kafka -----------------------------------------------------------
# If not already set, set the namespace where Kafka will be deployed.
export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
# If not already set, set the external port Kafka Client interface will be exposed to.
export KFK_EXT_PORT_CLIENT=${KFK_EXT_PORT_CLIENT:-"9092"}
# If not already set, set Kafka installation mode. Accepted values are: 'single'.
# - If KFK_DEPLOY_MODE is "single", Kafka is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# NOTE: Production mode is still not supported. Will be provided in the future.
export KFK_DEPLOY_MODE=${KFK_DEPLOY_MODE:-"single"}
# If not already set, disable flag for re-deploying Kafka from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If KFK_REDEPLOY is "YES", the message broker will be dropped while checking/deploying Kafka.
export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
# ----- QuestDB ---------------------------------------------------------------- # ----- QuestDB ----------------------------------------------------------------
# If not already set, set the namespace where QuestDB will be deployed. # If not already set, set the namespace where QuestDB will be deployed.
...@@ -215,8 +235,8 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} ...@@ -215,8 +235,8 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
# Deploy Apache Kafka # Deploy Apache Kafka
./deploy/kafka.sh ./deploy/kafka.sh
#Deploy Monitoring (Prometheus, Mimir, Grafana) #Deploy Monitoring (Prometheus Gateway, Prometheus)
./deploy/monitoring.sh # ./deploy/monitoring.sh
# Expose Dashboard # Expose Dashboard
./deploy/expose_dashboard.sh ./deploy/expose_dashboard.sh
......
...@@ -66,7 +66,7 @@ CRDB_MANIFESTS_PATH="manifests/cockroachdb" ...@@ -66,7 +66,7 @@ CRDB_MANIFESTS_PATH="manifests/cockroachdb"
# Create a tmp folder for files modified during the deployment # Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${CRDB_NAMESPACE}/manifests" TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${CRDB_NAMESPACE}/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER mkdir -p ${TMP_MANIFESTS_FOLDER}
function crdb_deploy_single() { function crdb_deploy_single() {
echo "CockroachDB Namespace" echo "CockroachDB Namespace"
...@@ -105,6 +105,13 @@ function crdb_deploy_single() { ...@@ -105,6 +105,13 @@ function crdb_deploy_single() {
sleep 1 sleep 1
done done
kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-0 kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-0
# Wait for CockroachDB to notify "start_node_query"
echo ">>> CockroachDB pods created. Waiting CockroachDB server to be started..."
while ! kubectl --namespace ${CRDB_NAMESPACE} logs pod/cockroachdb-0 -c cockroachdb 2>&1 | grep -q 'start_node_query'; do
printf "%c" "."
sleep 1
done
fi fi
echo echo
......
...@@ -13,17 +13,26 @@ ...@@ -13,17 +13,26 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
######################################################################################################################## ########################################################################################################################
# Read deployment settings # Read deployment settings
######################################################################################################################## ########################################################################################################################
# If not already set, set the namespace where Apache Kafka will be deployed. # If not already set, set the namespace where Kafka will be deployed.
export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
# If not already set, set the port Apache Kafka server will be exposed to. # If not already set, set the external port Kafka client interface will be exposed to.
export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"} export KFK_EXT_PORT_CLIENT=${KFK_EXT_PORT_CLIENT:-"9092"}
# If not already set, set Kafka installation mode. Accepted values are: 'single'.
# - If KFK_DEPLOY_MODE is "single", Kafka is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# NOTE: Production mode is still not supported. Will be provided in the future.
export KFK_DEPLOY_MODE=${KFK_DEPLOY_MODE:-"single"}
# If not already set, if flag is YES, Apache Kafka will be redeployed and all topics will be lost. # If not already set, disable flag for re-deploying Kafka from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If KFK_REDEPLOY is "YES", the message broker will be dropped while checking/deploying Kafka.
export KFK_REDEPLOY=${KFK_REDEPLOY:-""} export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
...@@ -34,58 +43,83 @@ export KFK_REDEPLOY=${KFK_REDEPLOY:-""} ...@@ -34,58 +43,83 @@ export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
# Constants # Constants
TMP_FOLDER="./tmp" TMP_FOLDER="./tmp"
KFK_MANIFESTS_PATH="manifests/kafka" KFK_MANIFESTS_PATH="manifests/kafka"
KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
KFK_MANIFEST="02-kafka.yaml"
# Create a tmp folder for files modified during the deployment # Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests" TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
mkdir -p ${TMP_MANIFESTS_FOLDER} mkdir -p ${TMP_MANIFESTS_FOLDER}
function kafka_deploy() { function kfk_deploy_single() {
# copy zookeeper and kafka manifest files to temporary manifest location echo "Kafka Namespace"
cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" echo ">>> Create Kafka Namespace (if missing)"
cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
# echo "Apache Kafka Namespace"
echo "Delete Apache Kafka Namespace"
kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
echo "Create Apache Kafka Namespace"
kubectl create namespace ${KFK_NAMESPACE} kubectl create namespace ${KFK_NAMESPACE}
echo
# echo ">>> Deplying Apache Kafka Zookeeper" echo "Kafka (single-mode)"
# Kafka zookeeper service should be deployed before the kafka service echo ">>> Checking if Kafka is deployed..."
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" if kubectl get --namespace ${KFK_NAMESPACE} statefulset/kafka &> /dev/null; then
echo ">>> Kafka is present; skipping step."
#KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically else
#KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') echo ">>> Deploy Kafka"
cp "${KFK_MANIFESTS_PATH}/single-node.yaml" "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml"
#sed -i "s/<KFK_NAMESPACE>/${KFK_NAMESPACE}/" "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml"
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml"
echo ">>> Waiting Kafka statefulset to be created..."
while ! kubectl get --namespace ${KFK_NAMESPACE} statefulset/kafka &> /dev/null; do
printf "%c" "."
sleep 1
done
# Wait for statefulset condition "Available=True" does not work
# Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
# "error: readyReplicas is not found"
# Workaround: Check the pods are ready
#echo ">>> Kafka statefulset created. Waiting for readiness condition..."
#kubectl wait --namespace ${KFK_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/kafka
#kubectl wait --namespace ${KGK_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
# statefulset/kafka
echo ">>> Kafka statefulset created. Waiting Kafka pods to be created..."
while ! kubectl get --namespace ${KFK_NAMESPACE} pod/kafka-0 &> /dev/null; do
printf "%c" "."
sleep 1
done
kubectl wait --namespace ${KFK_NAMESPACE} --for=condition=Ready --timeout=300s pod/kafka-0
# Wait for Kafka to notify "Kafka Server started"
echo ">>> Kafka pods created. Waiting Kafka Server to be started..."
while ! kubectl --namespace ${KFK_NAMESPACE} logs pod/kafka-0 -c kafka 2>&1 | grep -q 'Kafka Server started'; do
printf "%c" "."
sleep 1
done
fi
echo
}
# Kafka service should be deployed after the zookeeper service
#sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
sed -i "s/<KAFKA_NAMESPACE>/${KFK_NAMESPACE}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
# echo ">>> Deploying Apache Kafka Broker" function kfk_undeploy_single() {
kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" echo "Kafka (single-mode)"
echo ">>> Checking if Kafka is deployed..."
if kubectl get --namespace ${KFK_NAMESPACE} statefulset/kafka &> /dev/null; then
echo ">>> Undeploy Kafka"
kubectl delete --namespace ${KFK_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml" --ignore-not-found
else
echo ">>> Kafka is not present; skipping step."
fi
echo
# echo ">>> Verifing Apache Kafka deployment" echo "Kafka Namespace"
sleep 5 echo ">>> Delete Kafka Namespace (if exists)"
# KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods) echo "NOTE: this step might take few minutes to complete!"
# if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
# echo "Deployment Error: \n $KFK_PODS_STATUS" echo
# else
# echo "$KFK_PODS_STATUS"
# fi
} }
echo ">>> Apache Kafka" if [ "$KFK_DEPLOY_MODE" == "single" ]; then
echo "Checking if Apache Kafka is deployed ... "
if [ "$KFK_REDEPLOY" == "YES" ]; then if [ "$KFK_REDEPLOY" == "YES" ]; then
echo "Redeploying kafka namespace" kfk_undeploy_single
kafka_deploy fi
elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then
echo "Apache Kafka already present; skipping step." kfk_deploy_single
else else
echo "Kafka namespace doesn't exists. Deploying kafka namespace" echo "Unsupported value: KFK_DEPLOY_MODE=$KFK_DEPLOY_MODE"
kafka_deploy
fi fi
echo
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
# limitations under the License. # limitations under the License.
set -euo pipefail set -euo pipefail
: "${KUBECONFIG:=/var/snap/microk8s/current/credentials/client.config}"
# ----------------------------------------------------------- # -----------------------------------------------------------
# Global namespace for all deployments # Global namespace for all deployments
...@@ -28,7 +30,7 @@ RELEASE_NAME_PROM="mon-prometheus" ...@@ -28,7 +30,7 @@ RELEASE_NAME_PROM="mon-prometheus"
CHART_REPO_NAME_PROM="prometheus-community" CHART_REPO_NAME_PROM="prometheus-community"
CHART_REPO_URL_PROM="https://prometheus-community.github.io/helm-charts" CHART_REPO_URL_PROM="https://prometheus-community.github.io/helm-charts"
CHART_NAME_PROM="prometheus" CHART_NAME_PROM="prometheus"
VALUES_FILE_PROM="$VALUES_FILE_PATH/prometheus_values.yaml" VALUES_FILE_PROM="$VALUES_FILE_PATH/prometheus_values.yaml" # Values file for Prometheus and gateway
# ----------------------------------------------------------- # -----------------------------------------------------------
# Mimir Configuration # Mimir Configuration
...@@ -76,7 +78,8 @@ deploy_chart() { ...@@ -76,7 +78,8 @@ deploy_chart() {
echo "Installing/Upgrading $release_name using custom values from $values_file..." echo "Installing/Upgrading $release_name using custom values from $values_file..."
helm upgrade --install "$release_name" "$chart_repo_name/$chart_name" \ helm upgrade --install "$release_name" "$chart_repo_name/$chart_name" \
--namespace "$namespace" \ --namespace "$namespace" \
--values "$values_file" --values "$values_file" \
--kubeconfig "$KUBECONFIG"
else else
echo "Installing/Upgrading $release_name with default chart values..." echo "Installing/Upgrading $release_name with default chart values..."
helm upgrade --install "$release_name" "$chart_repo_name/$chart_name" \ helm upgrade --install "$release_name" "$chart_repo_name/$chart_name" \
......
...@@ -51,12 +51,6 @@ export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} ...@@ -51,12 +51,6 @@ export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
# If not already set, set the namespace where CockroackDB will be deployed. # If not already set, set the namespace where CockroackDB will be deployed.
export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
# If not already set, set the external port CockroackDB Postgre SQL interface will be exposed to.
export CRDB_EXT_PORT_SQL=${CRDB_EXT_PORT_SQL:-"26257"}
# If not already set, set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
export CRDB_EXT_PORT_HTTP=${CRDB_EXT_PORT_HTTP:-"8081"}
# If not already set, set the database username to be used by Context. # If not already set, set the database username to be used by Context.
export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"} export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"}
...@@ -69,27 +63,12 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} ...@@ -69,27 +63,12 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
# If not already set, set the namespace where NATS will be deployed. # If not already set, set the namespace where NATS will be deployed.
export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
# If not already set, set the external port NATS Client interface will be exposed to.
export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
# If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
# ----- QuestDB ---------------------------------------------------------------- # ----- QuestDB ----------------------------------------------------------------
# If not already set, set the namespace where QuestDB will be deployed. # If not already set, set the namespace where QuestDB will be deployed.
export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
# If not already set, set the external port QuestDB Postgre SQL interface will be exposed to.
export QDB_EXT_PORT_SQL=${QDB_EXT_PORT_SQL:-"8812"}
# If not already set, set the external port QuestDB Influx Line Protocol interface will be exposed to.
export QDB_EXT_PORT_ILP=${QDB_EXT_PORT_ILP:-"9009"}
# If not already set, set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
export QDB_EXT_PORT_HTTP=${QDB_EXT_PORT_HTTP:-"9000"}
# If not already set, set the database username to be used for QuestDB. # If not already set, set the database username to be used for QuestDB.
export QDB_USERNAME=${QDB_USERNAME:-"admin"} export QDB_USERNAME=${QDB_USERNAME:-"admin"}
...@@ -114,14 +93,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} ...@@ -114,14 +93,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
# ----- Apache Kafka ------------------------------------------------------ # ----- Apache Kafka ------------------------------------------------------
# If not already set, set the namespace where Apache Kafka will be deployed. # If not already set, set the namespace where Kafka will be deployed.
export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"} export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
# If not already set, set the port Apache Kafka server will be exposed to.
export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"}
# If not already set, if flag is YES, Apache Kafka will be redeployed and topic will be lost.
export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
######################################################################################################################## ########################################################################################################################
# Automated steps start here # Automated steps start here
...@@ -154,7 +128,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type= ...@@ -154,7 +128,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type=
printf "\n" printf "\n"
echo ">>> Create Secret with Apache Kafka..." echo ">>> Create Secret with Apache Kafka..."
KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}') KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-public -o 'jsonpath={.spec.ports[0].port}')
kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \ --from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \
--from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT} --from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT}
......
...@@ -61,7 +61,7 @@ spec: ...@@ -61,7 +61,7 @@ spec:
containers: containers:
- name: cockroachdb - name: cockroachdb
image: cockroachdb/cockroach:latest-v22.2 image: cockroachdb/cockroach:latest-v22.2
imagePullPolicy: Always imagePullPolicy: IfNotPresent
args: args:
- start-single-node - start-single-node
ports: ports:
......
...@@ -55,9 +55,15 @@ spec: ...@@ -55,9 +55,15 @@ spec:
readinessProbe: readinessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"] command: ["/bin/grpc_health_probe", "-addr=:1010"]
initialDelaySeconds: 50 # Context's gunicorn takes 30~40 seconds to bootstrap
periodSeconds: 10
failureThreshold: 10
livenessProbe: livenessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"] command: ["/bin/grpc_health_probe", "-addr=:1010"]
initialDelaySeconds: 50 # Context's gunicorn takes 30~40 seconds to bootstrap
periodSeconds: 10
failureThreshold: 10
resources: resources:
requests: requests:
cpu: 250m cpu: 250m
......
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
labels:
app: zookeeper-service
name: zookeeper-service
spec:
type: ClusterIP
ports:
- name: zookeeper-port
port: 2181
#nodePort: 30181
#targetPort: 2181
selector:
app: zookeeper
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: zookeeper
name: zookeeper
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper
template:
metadata:
labels:
app: zookeeper
spec:
containers:
- image: wurstmeister/zookeeper
imagePullPolicy: IfNotPresent
name: zookeeper
ports:
- containerPort: 2181
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
labels:
app: kafka-broker
name: kafka-service
spec:
ports:
- port: 9092
selector:
app: kafka-broker
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kafka-broker
name: kafka-broker
spec:
replicas: 1
selector:
matchLabels:
app: kafka-broker
template:
metadata:
labels:
app: kafka-broker
spec:
hostname: kafka-broker
containers:
- env:
- name: KAFKA_BROKER_ID
value: "1"
- name: KAFKA_ZOOKEEPER_CONNECT
#value: <ZOOKEEPER_INTERNAL_IP>:2181
value: zookeeper-service.<KAFKA_NAMESPACE>.svc.cluster.local:2181
- name: KAFKA_LISTENERS
value: PLAINTEXT://:9092
- name: KAFKA_ADVERTISED_LISTENERS
value: PLAINTEXT://kafka-service.<KAFKA_NAMESPACE>.svc.cluster.local:9092
image: wurstmeister/kafka
imagePullPolicy: IfNotPresent
name: kafka-broker
ports:
- containerPort: 9092
# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: kafka-public
labels:
app.kubernetes.io/component: message-broker
app.kubernetes.io/instance: kafka
app.kubernetes.io/name: kafka
spec:
type: ClusterIP
selector:
app.kubernetes.io/component: message-broker
app.kubernetes.io/instance: kafka
app.kubernetes.io/name: kafka
ports:
- name: clients
port: 9092
protocol: TCP
targetPort: 9092
- name: control-plane
port: 9093
protocol: TCP
targetPort: 9093
- name: external
port: 9094
protocol: TCP
targetPort: 9094
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
spec:
selector:
matchLabels:
app.kubernetes.io/component: message-broker
app.kubernetes.io/instance: kafka
app.kubernetes.io/name: kafka
serviceName: "kafka-public"
replicas: 1
minReadySeconds: 5
template:
metadata:
labels:
app.kubernetes.io/component: message-broker
app.kubernetes.io/instance: kafka
app.kubernetes.io/name: kafka
spec:
terminationGracePeriodSeconds: 10
restartPolicy: Always
containers:
- name: kafka
image: bitnami/kafka:latest
imagePullPolicy: IfNotPresent
ports:
- name: clients
containerPort: 9092
- name: control-plane
containerPort: 9093
- name: external
containerPort: 9094
env:
- name: KAFKA_CFG_NODE_ID
value: "1"
- name: KAFKA_CFG_PROCESS_ROLES
value: "controller,broker"
- name: KAFKA_CFG_LISTENERS
value: "PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094"
- name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP
value: "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT"
- name: KAFKA_CFG_ADVERTISED_LISTENERS
value: "PLAINTEXT://kafka-public.kafka.svc.cluster.local:9092,EXTERNAL://localhost:9094"
- name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES
value: "CONTROLLER"
- name: KAFKA_CFG_CONTROLLER_QUORUM_VOTERS
value: "1@kafka-0:9093"
resources:
requests:
cpu: "250m"
memory: 1Gi
limits:
cpu: "1"
memory: 2Gi
...@@ -41,7 +41,7 @@ spec: ...@@ -41,7 +41,7 @@ spec:
- name: LOG_LEVEL - name: LOG_LEVEL
value: "INFO" value: "INFO"
- name: FLASK_ENV - name: FLASK_ENV
value: "production" # change to "development" if developing value: "production" # normal value is "production", change to "development" if developing
- name: IETF_NETWORK_RENDERER - name: IETF_NETWORK_RENDERER
value: "LIBYANG" value: "LIBYANG"
envFrom: envFrom:
......
...@@ -20,13 +20,15 @@ ...@@ -20,13 +20,15 @@
export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy. # Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # export TFS_COMPONENTS="context device pathcomp service slice nbi webui"
export TFS_COMPONENTS="context device pathcomp service webui"
# Uncomment to activate Monitoring (old) # Uncomment to activate Monitoring (old)
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Monitoring Framework (new) # Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" #export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager telemetry"
# Uncomment to activate QoS Profiles # Uncomment to activate QoS Profiles
#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" #export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
...@@ -134,7 +136,7 @@ export CRDB_PASSWORD="tfs123" ...@@ -134,7 +136,7 @@ export CRDB_PASSWORD="tfs123"
export CRDB_DEPLOY_MODE="single" export CRDB_DEPLOY_MODE="single"
# Disable flag for dropping database, if it exists. # Disable flag for dropping database, if it exists.
export CRDB_DROP_DATABASE_IF_EXISTS="" export CRDB_DROP_DATABASE_IF_EXISTS="YES"
# Disable flag for re-deploying CockroachDB from scratch. # Disable flag for re-deploying CockroachDB from scratch.
export CRDB_REDEPLOY="" export CRDB_REDEPLOY=""
...@@ -159,6 +161,22 @@ export NATS_DEPLOY_MODE="single" ...@@ -159,6 +161,22 @@ export NATS_DEPLOY_MODE="single"
export NATS_REDEPLOY="" export NATS_REDEPLOY=""
# ----- Apache Kafka -----------------------------------------------------------
# Set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE="kafka"
# Set the port Apache Kafka server will be exposed to.
export KFK_EXT_PORT_CLIENT="9092"
# Set Kafka installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/kafka.sh for additional details
export KFK_DEPLOY_MODE="single"
# Disable flag for re-deploying Kafka from scratch.
export KFK_REDEPLOY=""
# ----- QuestDB ---------------------------------------------------------------- # ----- QuestDB ----------------------------------------------------------------
# Set the namespace where QuestDB will be deployed. # Set the namespace where QuestDB will be deployed.
...@@ -199,15 +217,3 @@ export PROM_EXT_PORT_HTTP="9090" ...@@ -199,15 +217,3 @@ export PROM_EXT_PORT_HTTP="9090"
# Set the external port Grafana HTTP Dashboards will be exposed to. # Set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP="3000" export GRAF_EXT_PORT_HTTP="3000"
# ----- Apache Kafka -----------------------------------------------------------
# Set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE="kafka"
# Set the port Apache Kafka server will be exposed to.
export KFK_SERVER_PORT="9092"
# Set the flag to YES for redeploying of Apache Kafka
export KFK_REDEPLOY=""
...@@ -19,7 +19,7 @@ PROJECTDIR=`pwd` ...@@ -19,7 +19,7 @@ PROJECTDIR=`pwd`
cd $PROJECTDIR/src cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}') # CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" # export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
kpi_value_writer/tests/test_metric_writer_to_prom.py kpi_value_writer/tests/test_metric_writer_to_prom.py
...@@ -21,7 +21,7 @@ docker container prune -f ...@@ -21,7 +21,7 @@ docker container prune -f
docker pull "bitnami/kafka:latest" docker pull "bitnami/kafka:latest"
docker buildx build -t "mock_tfs_nbi_dependencies:test" -f ./src/tests/tools/mock_tfs_nbi_dependencies/Dockerfile . docker buildx build -t "mock_tfs_nbi_dependencies:test" -f ./src/tests/tools/mock_tfs_nbi_dependencies/Dockerfile .
docker buildx build -t "nbi:latest" -f ./src/nbi/Dockerfile . docker buildx build -t "nbi:latest" -f ./src/nbi/Dockerfile .
docker images --filter="dangling=true" --quiet | xargs -r docker rmi docker image prune --force
docker network create -d bridge teraflowbridge docker network create -d bridge teraflowbridge
......
...@@ -37,13 +37,13 @@ echo ...@@ -37,13 +37,13 @@ echo
echo "Build optical attack detector:" echo "Build optical attack detector:"
echo "------------------------------" echo "------------------------------"
docker build -t "opticalattackdetector:latest" -f ./src/opticalattackdetector/Dockerfile . docker build -t "opticalattackdetector:latest" -f ./src/opticalattackdetector/Dockerfile .
docker images --filter="dangling=true" --quiet | xargs -r docker rmi docker image prune --force
echo echo
echo "Build dbscan serving:" echo "Build dbscan serving:"
echo "---------------------" echo "---------------------"
docker build -t "dbscanserving:latest" -f ./src/dbscanserving/Dockerfile . docker build -t "dbscanserving:latest" -f ./src/dbscanserving/Dockerfile .
docker images --filter="dangling=true" --quiet | xargs -r docker rmi docker image prune --force
echo echo
echo "Create test environment:" echo "Create test environment:"
......
#!/bin/bash
# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# # Cleanup
# docker rm --force qkd-node
# docker network rm --force qkd-node-br
# # Create Docker network
# docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br
# <<<<<<<< HEAD:scripts/run_tests_locally-telemetry-gnmi.sh
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
# RCFILE=$PROJECTDIR/coverage/.coveragerc
export KFK_SERVER_ADDRESS='127.0.0.1:9094'
# CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
# export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_telemetry?sslmode=require"
RCFILE=$PROJECTDIR/coverage/.coveragerc
# this is unit test (should be tested with container-lab running)
# python3 -m pytest --log-level=info --log-cli-level=info --verbose \
# telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py
# this is integration test (should be tested with container-lab running)
python3 -m pytest --log-level=info --log-cli-level=info --verbose \
telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py # this is integration test
# ========
# # Create QKD Node
# docker run --detach --name qkd-node --network qkd-node-br --ip 172.254.250.101 mock-qkd-node:test
# # Dump QKD Node Docker containers
# docker ps -a
# echo "Bye!"
# >>>>>>>> develop:src/tests/tools/mock_qkd_node/run.sh
...@@ -19,6 +19,7 @@ build analytics: ...@@ -19,6 +19,7 @@ build analytics:
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: build stage: build
before_script: before_script:
- docker image prune --force
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script: script:
# This first build tags the builder resulting image to prevent being removed by dangling image removal command # This first build tags the builder resulting image to prevent being removed by dangling image removal command
...@@ -30,7 +31,7 @@ build analytics: ...@@ -30,7 +31,7 @@ build analytics:
- docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
after_script: after_script:
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi - docker image prune --force
rules: rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
......
...@@ -19,13 +19,14 @@ build automation: ...@@ -19,13 +19,14 @@ build automation:
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: build stage: build
before_script: before_script:
- docker image prune --force
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script: script:
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile . - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script: after_script:
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi - docker image prune --force
rules: rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
......