Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (116)
Showing
with 421 additions and 100 deletions
......@@ -50,5 +50,7 @@ include:
- local: '/src/kpi_value_api/.gitlab-ci.yml'
- local: '/src/kpi_value_writer/.gitlab-ci.yml'
- local: '/src/telemetry/.gitlab-ci.yml'
- local: '/src/analytics/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml'
......@@ -207,24 +207,24 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
########################################################################################################################
# Deploy CockroachDB
#./deploy/crdb.sh
./deploy/crdb.sh
# Deploy NATS
#./deploy/nats.sh
./deploy/nats.sh
# Deploy QuestDB
#./deploy/qdb.sh
./deploy/qdb.sh
# Deploy Apache Kafka
#./deploy/kafka.sh
./deploy/kafka.sh
# Expose Dashboard
#./deploy/expose_dashboard.sh
./deploy/expose_dashboard.sh
# Deploy TeraFlowSDN
./deploy/tfs.sh
# Show deploy summary
#./deploy/show.sh
./deploy/show.sh
echo "Done!"
......@@ -47,10 +47,10 @@ function kafka_deploy() {
cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
# echo "Apache Kafka Namespace"
echo ">>> Delete Apache Kafka Namespace"
echo "Delete Apache Kafka Namespace"
kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
echo ">>> Create Apache Kafka Namespace"
echo "Create Apache Kafka Namespace"
kubectl create namespace ${KFK_NAMESPACE}
# echo ">>> Deplying Apache Kafka Zookeeper"
......@@ -76,15 +76,15 @@ function kafka_deploy() {
# fi
}
echo "Apache Kafka"
echo ">>> Checking if Apache Kafka is deployed ... "
echo ">>> Apache Kafka"
echo "Checking if Apache Kafka is deployed ... "
if [ "$KFK_REDEPLOY" == "YES" ]; then
echo ">>> Redeploying kafka namespace"
echo "Redeploying kafka namespace"
kafka_deploy
elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then
echo ">>> Apache Kafka already present; skipping step."
echo "Apache Kafka already present; skipping step."
else
echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace"
echo "Kafka namespace doesn't exists. Deploying kafka namespace"
kafka_deploy
fi
echo
......@@ -141,60 +141,22 @@ TMP_LOGS_FOLDER="${TMP_FOLDER}/${TFS_K8S_NAMESPACE}/logs"
mkdir -p $TMP_LOGS_FOLDER
echo "Deleting and Creating a new namespace..."
#kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found
#kubectl create namespace $TFS_K8S_NAMESPACE
kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found
kubectl create namespace $TFS_K8S_NAMESPACE
sleep 2
printf "\n"
echo "Create secret with CockroachDB data"
echo ">>> Create Secret with CockroachDB data..."
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_CONTEXT=${CRDB_DATABASE} # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_CONTEXT} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for KPI Management microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_KPI_MGMT} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for Telemetry microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_TELEMETRY="tfs_telemetry" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for Analytics microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_ANALYTICS="tfs_analytics" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-analytics --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_ANALYTICS} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with Apache Kafka data for KPI, Telemetry and Analytics microservices"
echo ">>> Create Secret with Apache Kakfa..."
KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}')
kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \
......@@ -252,7 +214,7 @@ echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
echo "Create Redis secret..."
# first try to delete an old one if exists
#kubectl delete secret redis-secrets --namespace=$TFS_K8S_NAMESPACE --ignore-not-found
kubectl delete secret redis-secrets --namespace=$TFS_K8S_NAMESPACE --ignore-not-found
REDIS_PASSWORD=`uuidgen`
kubectl create secret generic redis-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=REDIS_PASSWORD=$REDIS_PASSWORD
......@@ -669,6 +631,10 @@ if [[ "$TFS_COMPONENTS" == *"monitoring"* ]] && [[ "$TFS_COMPONENTS" == *"webui"
printf "\n\n"
fi
echo "Pruning Docker Images..."
docker image prune --force
printf "\n\n"
if [ "$DOCKER_BUILD" == "docker buildx build" ]; then
echo "Pruning Docker Buildx Cache..."
docker buildx prune --force
......
......@@ -37,9 +37,13 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "tfs_analytics"
- name: METRICS_PORT
value: "9192"
envFrom:
- secretRef:
name: crdb-analytics
name: crdb-data
- secretRef:
name: kfk-kpi-data
readinessProbe:
......@@ -60,10 +64,12 @@ spec:
imagePullPolicy: Always
ports:
- containerPort: 30090
- containerPort: 9192
- containerPort: 9193
env:
- name: LOG_LEVEL
value: "INFO"
- name: METRICS_PORT
value: "9193"
envFrom:
- secretRef:
name: kfk-kpi-data
......@@ -100,10 +106,14 @@ spec:
protocol: TCP
port: 30090
targetPort: 30090
- name: metrics
- name: metrics-frontend
protocol: TCP
port: 9192
targetPort: 9192
- name: metrics-backend
protocol: TCP
port: 9193
targetPort: 9193
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......
......@@ -45,6 +45,8 @@ spec:
value: "FALSE"
- name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
value: "FALSE"
- name: CRDB_DATABASE
value: "tfs_context"
envFrom:
- secretRef:
name: crdb-data
......
......@@ -39,9 +39,11 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "tfs_kpi_mgmt"
envFrom:
- secretRef:
name: crdb-kpi-data
name: crdb-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30010"]
......
......@@ -38,6 +38,8 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
- name: IETF_NETWORK_RENDERER
value: "LIBYANG"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:9090"]
......
......@@ -62,3 +62,10 @@ spec:
name: nbiservice
port:
number: 8080
- path: /()(qkd_app/.*)
pathType: Prefix
backend:
service:
name: qkd-appservice
port:
number: 8005
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: qkd-appservice
spec:
selector:
matchLabels:
app: qkd-appservice
#replicas: 1
template:
metadata:
labels:
app: qkd-appservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/qkd_app:latest
imagePullPolicy: Always
ports:
- containerPort: 10060
- containerPort: 9192
- containerPort: 8005
env:
- name: LOG_LEVEL
value: "DEBUG"
- name: CRDB_DATABASE_APP
value: "qkd_app"
envFrom:
- secretRef:
name: crdb-data
- secretRef:
name: nats-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10060"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10060"]
resources:
requests:
cpu: 150m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
---
apiVersion: v1
kind: Service
metadata:
name: qkd-appservice
labels:
app: qkd-appservice
spec:
type: ClusterIP
selector:
app: qkd-appservice
ports:
- name: grpc
protocol: TCP
port: 10060
targetPort: 10060
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: http
port: 8005
targetPort: 8005
......@@ -475,3 +475,156 @@ spec:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-analyticsservice-metric
labels:
app: analyticsservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: analyticsservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics-frontend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
- port: metrics-backend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-telemetryservice-metric
labels:
app: telemetryservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: telemetryservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics-frontend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
- port: metrics-backend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-kpi-managerservice-metric
labels:
app: kpi-managerservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: kpi-managerservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-kpi_value_apiservice-metric
labels:
app: kpi_value_apiservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: kpi_value_apiservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-kpi_value_writerservice-metric
labels:
app: kpi_value_writerservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: kpi_value_writerservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
......@@ -37,9 +37,13 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "tfs_telemetry"
- name: METRICS_PORT
value: "9192"
envFrom:
- secretRef:
name: crdb-telemetry
name: crdb-data
- secretRef:
name: kfk-kpi-data
readinessProbe:
......@@ -60,10 +64,12 @@ spec:
imagePullPolicy: Always
ports:
- containerPort: 30060
- containerPort: 9192
- containerPort: 9193
env:
- name: LOG_LEVEL
value: "INFO"
- name: METRICS_PORT
value: "9193"
envFrom:
- secretRef:
name: kfk-kpi-data
......@@ -100,10 +106,14 @@ spec:
protocol: TCP
port: 30060
targetPort: 30060
- name: metrics
- name: metrics-frontend
protocol: TCP
port: 9192
targetPort: 9192
- name: metrics-backend
protocol: TCP
port: 9193
targetPort: 9193
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......
......@@ -20,13 +20,13 @@
export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="analytics"
export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator automation"
# Uncomment to activate Monitoring (old)
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics"
export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics"
# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
......@@ -42,10 +42,10 @@ export TFS_COMPONENTS="analytics"
#fi
# Uncomment to activate ZTP
#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
# Uncomment to activate Policy Manager
#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
# Uncomment to activate Optical CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
......@@ -71,11 +71,18 @@ export TFS_COMPONENTS="analytics"
#fi
# Uncomment to activate QKD App
#export TFS_COMPONENTS="${TFS_COMPONENTS} app"
# To manage QKD Apps, "service" requires "qkd_app" to be deployed
# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
# BEFORE="${TFS_COMPONENTS% service*}"
# AFTER="${TFS_COMPONENTS#* service}"
# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
#fi
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="panos31"
export TFS_IMAGE_TAG="dev"
# Set the name of the Kubernetes namespace to deploy TFS to.
export TFS_K8S_NAMESPACE="tfs"
......@@ -124,7 +131,7 @@ export CRDB_DEPLOY_MODE="single"
export CRDB_DROP_DATABASE_IF_EXISTS=""
# Disable flag for re-deploying CockroachDB from scratch.
export CRDB_REDEPLOY="YES"
export CRDB_REDEPLOY=""
# ----- NATS -------------------------------------------------------------------
......@@ -143,7 +150,7 @@ export NATS_EXT_PORT_HTTP="8222"
export NATS_DEPLOY_MODE="single"
# Disable flag for re-deploying NATS from scratch.
export NATS_REDEPLOY="YES"
export NATS_REDEPLOY=""
# ----- QuestDB ----------------------------------------------------------------
......@@ -176,7 +183,7 @@ export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
export QDB_DROP_TABLES_IF_EXIST=""
# Disable flag for re-deploying QuestDB from scratch.
export QDB_REDEPLOY="YES"
export QDB_REDEPLOY=""
# ----- K8s Observability ------------------------------------------------------
......
syntax = "proto3";
package qkd_app;
import "context.proto";
// Optare: Change this if you want to change App's structure or enums.
// Optare: If a message (structure) is changed it must be changed in src/app/service/database
enum QKDAppStatusEnum {
QKDAPPSTATUS_ON = 0;
QKDAPPSTATUS_DISCONNECTED = 1;
QKDAPPSTATUS_OUT_OF_TIME = 2;
QKDAPPSTATUS_ZOMBIE = 3;
}
enum QKDAppTypesEnum {
QKDAPPTYPES_INTERNAL = 0;
QKDAPPTYPES_CLIENT = 1;
}
message QKDLId {
context.Uuid qkdl_uuid = 1;
}
message App {
AppId app_id = 1;
QKDAppStatusEnum app_status = 2;
QKDAppTypesEnum app_type = 3;
string server_app_id = 4;
repeated string client_app_id = 5;
repeated QKDLId backing_qkdl_id = 6;
context.DeviceId local_device_id = 7;
context.DeviceId remote_device_id = 8;
}
message AppId {
context.ContextId context_id = 1;
context.Uuid app_uuid = 2;
}
service AppService {
rpc RegisterApp(App) returns (context.Empty) {}
rpc ListApps (context.ContextId ) returns ( AppList ) {}
}
message AppList {
repeated App apps = 1;
}
......@@ -24,7 +24,7 @@ cd $PROJECTDIR/src
# python3 kpi_manager/tests/test_unitary.py
RCFILE=$PROJECTDIR/coverage/.coveragerc
CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}')
CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
kpi_manager/tests/test_kpi_db.py
......@@ -20,9 +20,8 @@ cd $PROJECTDIR/src
# RCFILE=$PROJECTDIR/coverage/.coveragerc
# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
# kpi_manager/tests/test_unitary.py
CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs-telemetry?sslmode=require"
RCFILE=$PROJECTDIR/coverage/.coveragerc
python3 -m pytest --log-level=DEBUG --log-cli-level=debug --verbose \
telemetry/tests/test_telemetryDB.py
......@@ -19,11 +19,9 @@ PROJECTDIR=`pwd`
cd $PROJECTDIR/src
# RCFILE=$PROJECTDIR/coverage/.coveragerc
# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
# kpi_manager/tests/test_unitary.py
# python3 kpi_manager/tests/test_unitary.py
CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
RCFILE=$PROJECTDIR/coverage/.coveragerc
python3 -m pytest --log-level=INFO --log-cli-level=debug --verbose \
telemetry/backend/tests/test_backend.py
telemetry/backend/tests/test_TelemetryBackend.py
......@@ -17,13 +17,10 @@
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
# RCFILE=$PROJECTDIR/coverage/.coveragerc
# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
# kpi_manager/tests/test_unitary.py
# python3 kpi_manager/tests/test_unitary.py
CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
RCFILE=$PROJECTDIR/coverage/.coveragerc
python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
telemetry/frontend/tests/test_frontend.py
#!/bin/bash
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/qkd-appservice -c server
......@@ -60,6 +60,7 @@ unit_test analytics-backend:
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
- if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
# - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi
- if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend container is not in the system"; fi
- docker container prune -f
script:
......@@ -68,6 +69,7 @@ unit_test analytics-backend:
- docker pull "bitnami/kafka:latest"
- >
docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
--env ALLOW_ANONYMOUS_LOGIN=yes
bitnami/zookeeper:latest
- sleep 10 # Wait for Zookeeper to start
- >
......@@ -75,7 +77,7 @@ unit_test analytics-backend:
--env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
--env ALLOW_PLAINTEXT_LISTENER=yes
bitnami/kafka:latest
- sleep 10 # Wait for Kafka to start
- sleep 20 # Wait for Kafka to start
- KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $KAFKA_IP
- >
......@@ -93,12 +95,12 @@ unit_test analytics-backend:
- docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f ${IMAGE_NAME}-backend
- docker rm -f kafka
- docker rm -f zookeeper
- docker network rm teraflowbridge
- docker volume prune --force
- docker image prune --force
- docker rm -f ${IMAGE_NAME}-backend
- docker rm -f zookeeper
- docker rm -f kafka
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
......@@ -150,19 +152,20 @@ unit_test analytics-frontend:
- CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $CRDB_ADDRESS
- >
docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \
-e ALLOW_ANONYMOUS_LOGIN=yes \
docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
--env ALLOW_ANONYMOUS_LOGIN=yes
bitnami/zookeeper:latest
- sleep 10 # Wait for Zookeeper to start
- docker run --name kafka -d --network=teraflowbridge -p 9092:9092
- >
docker run --name kafka -d --network=teraflowbridge -p 9092:9092
--env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
--env ALLOW_PLAINTEXT_LISTENER=yes
bitnami/kafka:latest
- sleep 10 # Wait for Kafka to start
- sleep 20 # Wait for Kafka to start
- KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $KAFKA_IP
# - docker logs zookeeper
# - docker logs kafka
- docker logs zookeeper
- docker logs kafka
- >
docker run --name $IMAGE_NAME-frontend -d -p 30050:30050
--env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
......@@ -179,13 +182,13 @@ unit_test analytics-frontend:
- docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker volume rm -f crdb
- docker network rm teraflowbridge
- docker volume prune --force
- docker image prune --force
- docker rm -f ${IMAGE_NAME}-frontend
- docker rm -f zookeeper
- docker rm -f kafka
- docker volume rm -f crdb
- docker volume prune --force
- docker image prune --force
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
......@@ -200,4 +203,4 @@ unit_test analytics-frontend:
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
\ No newline at end of file
junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml