Commits (633)
...@@ -17,29 +17,34 @@ stages: ...@@ -17,29 +17,34 @@ stages:
#- dependencies #- dependencies
- build - build
- unit_test - unit_test
#- deploy - end2end_test
#- end2end_test
# include the individual .gitlab-ci.yml of each micro-service # include the individual .gitlab-ci.yml of each micro-service and tests
include: include:
#- local: '/manifests/.gitlab-ci.yml' #- local: '/manifests/.gitlab-ci.yml'
- local: '/src/monitoring/.gitlab-ci.yml' - local: '/src/monitoring/.gitlab-ci.yml'
- local: '/src/compute/.gitlab-ci.yml' - local: '/src/nbi/.gitlab-ci.yml'
- local: '/src/context/.gitlab-ci.yml' - local: '/src/context/.gitlab-ci.yml'
- local: '/src/device/.gitlab-ci.yml' - local: '/src/device/.gitlab-ci.yml'
- local: '/src/service/.gitlab-ci.yml' - local: '/src/service/.gitlab-ci.yml'
- local: '/src/dbscanserving/.gitlab-ci.yml' - local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml' - local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml' - local: '/src/opticalattackdetector/.gitlab-ci.yml'
# - local: '/src/opticalattackmanager/.gitlab-ci.yml' - local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/automation/.gitlab-ci.yml' - local: '/src/opticalcontroller/.gitlab-ci.yml'
- local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml' - local: '/src/policy/.gitlab-ci.yml'
- local: '/src/forecaster/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml' #- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_attackmitigator/.gitlab-ci.yml' #- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
#- local: '/src/slice/.gitlab-ci.yml' - local: '/src/slice/.gitlab-ci.yml'
#- local: '/src/interdomain/.gitlab-ci.yml' #- local: '/src/interdomain/.gitlab-ci.yml'
- local: '/src/pathcomp/.gitlab-ci.yml' - local: '/src/pathcomp/.gitlab-ci.yml'
#- local: '/src/dlt/.gitlab-ci.yml' #- local: '/src/dlt/.gitlab-ci.yml'
- local: '/src/load_generator/.gitlab-ci.yml' - local: '/src/load_generator/.gitlab-ci.yml'
- local: '/src/bgpls_speaker/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml'
...@@ -40,7 +40,7 @@ components deployed. ...@@ -40,7 +40,7 @@ components deployed.
## Impacted Components ## Impacted Components
List of impacted components: Context, Device, Service, PathComp, Slice, Monitoring, Automation, Policy, Compute, etc. List of impacted components: Context, Device, Service, PathComp, Slice, Monitoring, ZTP, Policy, NBI, etc.
Just an enumeration, elaboration of impacts is done below. Just an enumeration, elaboration of impacts is done below.
## Component1 Impact ## Component1 Impact
......
# ETSI TeraFlowSDN Controller # ETSI TeraFlowSDN Controller
[ETSI OpenSource Group for TeraFlowSDN](https://tfs.etsi.org/) The [ETSI Software Development Group TeraFlowSDN (SDG TFS)](https://tfs.etsi.org/) is developing an open source cloud native SDN controller enabling smart connectivity services for future networks beyond 5G.
Former, [Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows The project originated from "[Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows", a project funded by the European Union’s Horizon 2020 Research and Innovation programme that finished on 30th June 2023.
Branch "master" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/master/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/master/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master)
Branch "develop" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) ## Available branches and releases
# Installation Instructions and Functional Tests [![Latest Release](https://labs.etsi.org/rep/tfs/controller/-/badges/release.svg)](https://labs.etsi.org/rep/tfs/controller/-/releases)
The [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home) pages include details on using the ETSI TeraFlowSDN release 2.0.
The documentation, installation instructions, and description of the functional tests defined to enable experimentation with the ETSI TeraFlowSDN Controller can be found in the Wiki pages - The branch `master` ([![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/master/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/master/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master)), points always to the latest stable version of the TeraFlowSDN controller.
- The branches `release/X.Y.Z`, point to the code for the different release versions indicated in the branch name.
- Code in these branches can be considered stable, and no new features are planned.
- In case of bugs, point releases increasing revision number (Z) might be created.
- The `develop` ([![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop)) branch is the main development branch and contains the latest contributions.
- **Use it with care! It might not be stable.**
- The latest developments and contributions are added to this branch for testing and validation before reaching a release.
## Documentation
The [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home) pages include the main documentation for the ETSI TeraFlowSDN Controller.
The documentation includes project documentation, installation instructions, functional tests, supported NBIs and SBIs, etc.
...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} ...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed # By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
# If not already set, set the tag you want to use for your images. # If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
......
...@@ -40,6 +40,21 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} ...@@ -40,6 +40,21 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
# Automated steps start here # Automated steps start here
######################################################################################################################## ########################################################################################################################
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
# Constants # Constants
GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
TMP_FOLDER="./tmp" TMP_FOLDER="./tmp"
...@@ -59,18 +74,18 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -59,18 +74,18 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Building Docker image..." echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" $DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds # next command is redundant, but helpful to keep cache updated between rebuilds
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
else else
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi fi
if [ -n "$TFS_REGISTRY_IMAGES" ]; then if [ -n "$TFS_REGISTRY_IMAGES" ]; then
......
...@@ -158,9 +158,19 @@ function crdb_undeploy_single() { ...@@ -158,9 +158,19 @@ function crdb_undeploy_single() {
function crdb_drop_database_single() { function crdb_drop_database_single() {
echo "Drop database if exists" echo "Drop database if exists"
CRDB_PORT_SQL=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@cockroachdb-0:${CRDB_PORT_SQL}/defaultdb?sslmode=require" if [[ -z "${GITLAB_CI}" ]]; then
kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \ #kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o yaml
CRDB_HOST=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
CRDB_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
else
CRDB_HOST="127.0.0.1"
CRDB_PORT=${CRDB_EXT_PORT_SQL}
fi
CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@${CRDB_HOST}:${CRDB_PORT}/defaultdb?sslmode=require"
echo "CRDB_CLIENT_URL=${CRDB_CLIENT_URL}"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \ ./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};" --execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
echo echo
...@@ -343,7 +353,7 @@ function crdb_undeploy_cluster() { ...@@ -343,7 +353,7 @@ function crdb_undeploy_cluster() {
function crdb_drop_database_cluster() { function crdb_drop_database_cluster() {
echo "Drop database if exists" echo "Drop database if exists"
kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \ kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \ ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \
"DROP DATABASE IF EXISTS ${CRDB_DATABASE};" "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
echo echo
......
...@@ -38,28 +38,44 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" ...@@ -38,28 +38,44 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
TMP_FOLDER="./tmp" TMP_FOLDER="./tmp"
# Create a tmp folder for files modified during the deployment # Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER mkdir -p $TMP_MANIFESTS_FOLDER
TMP_LOGS_FOLDER="$TMP_FOLDER/logs" TMP_LOGS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/logs"
mkdir -p $TMP_LOGS_FOLDER mkdir -p $TMP_LOGS_FOLDER
echo "Deleting and Creating a new namespace..." echo "Deleting and Creating a new namespace..."
kubectl delete namespace $K8S_NAMESPACE kubectl delete namespace $K8S_NAMESPACE --ignore-not-found
kubectl create namespace $K8S_NAMESPACE kubectl create namespace $K8S_NAMESPACE
printf "\n" printf "\n"
echo "Deploying components and collecting environment variables..." echo "Deploying components and collecting environment variables..."
ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh
echo "# Environment variables for TeraFlow Mock-Blockchain deployment" > $ENV_VARS_SCRIPT echo "# Environment variables for TeraFlowSDN Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
PYTHONPATH=$(pwd)/src PYTHONPATH=$(pwd)/src
echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
echo "Processing '$COMPONENT' component..." echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$IMAGE_TAG" IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
IMAGE_URL=$(echo "$REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g') IMAGE_URL=$(echo "$REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
echo " Building Docker image..." echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
docker build -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG"
if [ -n "$REGISTRY_IMAGE" ]; then if [ -n "$REGISTRY_IMAGE" ]; then
echo " Pushing Docker image to '$REGISTRY_IMAGE'..." echo " Pushing Docker image to '$REGISTRY_IMAGE'..."
...@@ -77,12 +93,12 @@ cp ./manifests/"${COMPONENT}".yaml "$MANIFEST" ...@@ -77,12 +93,12 @@ cp ./manifests/"${COMPONENT}".yaml "$MANIFEST"
if [ -n "$REGISTRY_IMAGE" ]; then if [ -n "$REGISTRY_IMAGE" ]; then
# Registry is set # Registry is set
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
else else
# Registry is not set # Registry is not set
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST" sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
fi fi
...@@ -91,8 +107,8 @@ echo " Deploying '$COMPONENT' component to Kubernetes..." ...@@ -91,8 +107,8 @@ echo " Deploying '$COMPONENT' component to Kubernetes..."
DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG" #kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG" #kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
echo " Collecting env-vars for '$COMPONENT' component..." echo " Collecting env-vars for '$COMPONENT' component..."
SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json) SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json)
......
...@@ -47,6 +47,10 @@ function nats_deploy_single() { ...@@ -47,6 +47,10 @@ function nats_deploy_single() {
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo echo
echo "Upgrade NATS Helm Chart"
helm3 repo update nats
echo
echo "Install NATS (single-node)" echo "Install NATS (single-node)"
echo ">>> Checking if NATS is deployed..." echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
...@@ -81,6 +85,10 @@ function nats_deploy_single() { ...@@ -81,6 +85,10 @@ function nats_deploy_single() {
echo "NATS Port Mapping" echo "NATS Port Mapping"
echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})" echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_PORT_CLIENT" ]; then
# NATS charts updated and port name changed from "client" to "nats"; fix to support new name and enable backward compatibility
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="nats")].port}')
fi
PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}' PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
......
...@@ -160,10 +160,17 @@ function qdb_undeploy() { ...@@ -160,10 +160,17 @@ function qdb_undeploy() {
} }
function qdb_drop_tables() { function qdb_drop_tables() {
QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}')
QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
echo "Drop tables, if exist" echo "Drop tables, if exist"
if [[ -z "${GITLAB_CI}" ]]; then
#kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o yaml
QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}')
QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
else
QDB_HOST="127.0.0.1"
QDB_PORT=${QDB_EXT_PORT_HTTP}
fi
curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;" curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;"
echo echo
curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;" curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;"
......
...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} ...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed # By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
# If not already set, set the tag you want to use for your images. # If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
...@@ -148,6 +148,10 @@ printf "\n" ...@@ -148,6 +148,10 @@ printf "\n"
echo "Create secret with NATS data" echo "Create secret with NATS data"
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_CLIENT_PORT" ]; then
# NATS charts updated and port name changed from "client" to "nats"; fix to support new name and enable backward compatibility
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="nats")].port}')
fi
kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \ --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \
--from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT}
...@@ -183,6 +187,22 @@ REDIS_PASSWORD=`uuidgen` ...@@ -183,6 +187,22 @@ REDIS_PASSWORD=`uuidgen`
kubectl create secret generic redis-secrets --namespace=$TFS_K8S_NAMESPACE \ kubectl create secret generic redis-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=REDIS_PASSWORD=$REDIS_PASSWORD --from-literal=REDIS_PASSWORD=$REDIS_PASSWORD
echo "export REDIS_PASSWORD=${REDIS_PASSWORD}" >> $ENV_VARS_SCRIPT echo "export REDIS_PASSWORD=${REDIS_PASSWORD}" >> $ENV_VARS_SCRIPT
printf "\n"
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
for COMPONENT in $TFS_COMPONENTS; do for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..." echo "Processing '$COMPONENT' component..."
...@@ -191,25 +211,25 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -191,25 +211,25 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Building Docker image..." echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds # next command is redundant, but helpful to keep cache updated between rebuilds
IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
elif [ "$COMPONENT" == "dlt" ]; then elif [ "$COMPONENT" == "dlt" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
else else
docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi fi
echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
...@@ -279,8 +299,13 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -279,8 +299,13 @@ for COMPONENT in $TFS_COMPONENTS; do
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4) VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
else else
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') if [ "$TFS_SKIP_BUILD" != "YES" ]; then
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4) IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
else
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$VERSION" | sed 's,//,/,g' | sed 's,http:/,,g')
fi
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
fi fi
...@@ -344,11 +369,17 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -344,11 +369,17 @@ for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..." echo "Waiting for '$COMPONENT' component..."
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl wait --namespace $TFS_K8S_NAMESPACE \ kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service --for='condition=available' --timeout=90s deployment/${COMPONENT_OBJNAME}service
WAIT_EXIT_CODE=$?
if [[ $WAIT_EXIT_CODE != 0 ]]; then
echo " Failed to deploy '${COMPONENT}' component, exit code '${WAIT_EXIT_CODE}', exiting..."
kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/${COMPONENT_OBJNAME}service --all-containers=true
exit $WAIT_EXIT_CODE
fi
printf "\n" printf "\n"
done done
if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then if [[ "$TFS_COMPONENTS" == *"monitoring"* ]] && [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
echo "Configuring WebUI DataStores and Dashboards..." echo "Configuring WebUI DataStores and Dashboards..."
sleep 5 sleep 5
...@@ -554,3 +585,9 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then ...@@ -554,3 +585,9 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
printf "\n\n" printf "\n\n"
fi fi
if [ "$DOCKER_BUILD" == "docker buildx build" ]; then
echo "Pruning Docker Buildx Cache..."
docker buildx prune --force
printf "\n\n"
fi
...@@ -60,22 +60,24 @@ docker exec -it clab-tfs-scenario-client2 bash ...@@ -60,22 +60,24 @@ docker exec -it clab-tfs-scenario-client2 bash
$ sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)" $ sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)"
## gNMI Capabilities request ## gNMI Capabilities request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities
## gNMI Get request ## gNMI Get request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interface[name=mgmt0] $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interfaces/interface[name=mgmt0]
## gNMI Set request ## gNMI Set request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/name/host-name --update-value slr11 $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/config/hostname --update-value srl11
(we check the changed value) (we check the changed value)
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
## Subscribe request ## Subscribe request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interface[name=mgmt0]/statistics $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interfaces/interface[name=mgmt0]/state/
(In another terminal, you can generate traffic) (In another terminal, you can generate traffic)
$ssh admin@clab-srlinux-srl1 $ssh admin@clab-tfs-scenario-srl1
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy. # Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device automation service compute monitoring webui" export TFS_COMPONENTS="context device ztp service nbi monitoring webui"
# Set the tag you want to use for your images. # Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev" export TFS_IMAGE_TAG="dev"
......
src/tests/hackfest3/
\ No newline at end of file
...@@ -16,14 +16,30 @@ ...@@ -16,14 +16,30 @@
# If not already set, set the list of components you want to install dependencies for. # If not already set, set the list of components you want to install dependencies for.
# By default, dependencies for all components are installed. # By default, dependencies for all components are installed.
# Components still not supported by this script: # Components still not supported by this script:
# automation & policy : implemented in Java # ztp & policy : implemented in Java
# dlt : under design # dlt : under design
# pathcomp : under design # pathcomp : under design
ALL_COMPONENTS="context device service compute monitoring webui interdomain slice" ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice"
ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector" ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector"
ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector" ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS} TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
# Some components require libyang built from source code
# - Ref: https://github.com/CESNET/libyang
# - Ref: https://github.com/CESNET/libyang-python/
echo "Installing libyang..."
sudo apt-get --yes --quiet --quiet update
sudo apt-get --yes --quiet --quiet install build-essential cmake libpcre2-dev python3-dev python3-cffi
mkdir libyang
git clone https://github.com/CESNET/libyang.git libyang
mkdir libyang/build
cd libyang/build
cmake -D CMAKE_BUILD_TYPE:String="Release" ..
make
sudo make install
sudo ldconfig
cd ../..
echo "Updating PIP, SetupTools and Wheel..." echo "Updating PIP, SetupTools and Wheel..."
pip install --upgrade pip # ensure next packages get the latest versions pip install --upgrade pip # ensure next packages get the latest versions
pip install --upgrade setuptools wheel # bring basic tooling for other requirements pip install --upgrade setuptools wheel # bring basic tooling for other requirements
...@@ -38,7 +54,7 @@ printf "\n" ...@@ -38,7 +54,7 @@ printf "\n"
echo "Collecting requirements from components..." echo "Collecting requirements from components..."
for COMPONENT in $TFS_COMPONENTS for COMPONENT in $TFS_COMPONENTS
do do
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then continue; fi if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then continue; fi
diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
done done
printf "\n" printf "\n"
......
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: bgpls-speakerservice
spec:
selector:
matchLabels:
app: bgpls-speakerservice
replicas: 1
template:
metadata:
labels:
app: bgpls-speakerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: localhost:32000/tfs/bgpls_speaker:dev
imagePullPolicy: Always
ports:
- containerPort: 20030
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: bgpls-speakerservice
labels:
app: bgpls-speakerservice
spec:
type: ClusterIP
selector:
app: bgpls-speakerservice
ports:
- name: grpc
protocol: TCP
port: 20030
targetPort: 20030
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
...@@ -27,28 +27,28 @@ spec: ...@@ -27,28 +27,28 @@ spec:
app: cachingservice app: cachingservice
spec: spec:
containers: containers:
- name: redis - name: redis
image: redis:7.0-alpine image: redis:7.0-alpine
env: env:
- name: REDIS_PASSWORD - name: REDIS_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: redis-secrets name: redis-secrets
key: REDIS_PASSWORD key: REDIS_PASSWORD
ports: ports:
- containerPort: 6379 - containerPort: 6379
name: client name: client
command: ["redis-server"] command: ["redis-server"]
args: args:
- --requirepass - --requirepass
- $(REDIS_PASSWORD) - $(REDIS_PASSWORD)
resources: resources:
requests: requests:
cpu: 50m cpu: 50m
memory: 64Mi memory: 64Mi
limits: limits:
cpu: 500m cpu: 500m
memory: 512Mi memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
...@@ -59,6 +59,6 @@ spec: ...@@ -59,6 +59,6 @@ spec:
selector: selector:
app: cachingservice app: cachingservice
ports: ports:
- name: redis - name: redis
port: 6379 port: 6379
targetPort: 6379 targetPort: 6379
...@@ -30,35 +30,39 @@ spec: ...@@ -30,35 +30,39 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 5 terminationGracePeriodSeconds: 5
containers: containers:
- name: server - name: server
image: labs.etsi.org:5050/tfs/controller/context:latest image: labs.etsi.org:5050/tfs/controller/context:latest
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 1010 - containerPort: 1010
- containerPort: 9192 - containerPort: 9192
env: env:
- name: MB_BACKEND - name: MB_BACKEND
value: "nats" value: "nats"
- name: LOG_LEVEL - name: LOG_LEVEL
value: "INFO" value: "INFO"
envFrom: - name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY
- secretRef: value: "FALSE"
name: crdb-data - name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
- secretRef: value: "FALSE"
name: nats-data envFrom:
readinessProbe: - secretRef:
exec: name: crdb-data
command: ["/bin/grpc_health_probe", "-addr=:1010"] - secretRef:
livenessProbe: name: nats-data
exec: readinessProbe:
command: ["/bin/grpc_health_probe", "-addr=:1010"] exec:
resources: command: ["/bin/grpc_health_probe", "-addr=:1010"]
requests: livenessProbe:
cpu: 250m exec:
memory: 128Mi command: ["/bin/grpc_health_probe", "-addr=:1010"]
limits: resources:
cpu: 1000m requests:
memory: 1024Mi cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
...@@ -71,14 +75,14 @@ spec: ...@@ -71,14 +75,14 @@ spec:
selector: selector:
app: contextservice app: contextservice
ports: ports:
- name: grpc - name: grpc
protocol: TCP protocol: TCP
port: 1010 port: 1010
targetPort: 1010 targetPort: 1010
- name: metrics - name: metrics
protocol: TCP protocol: TCP
port: 9192 port: 9192
targetPort: 9192 targetPort: 9192
--- ---
apiVersion: autoscaling/v2 apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
...@@ -92,12 +96,12 @@ spec: ...@@ -92,12 +96,12 @@ spec:
minReplicas: 1 minReplicas: 1
maxReplicas: 20 maxReplicas: 20
metrics: metrics:
- type: Resource - type: Resource
resource: resource:
name: cpu name: cpu
target: target:
type: Utilization type: Utilization
averageUtilization: 80 averageUtilization: 80
#behavior: #behavior:
# scaleDown: # scaleDown:
# stabilizationWindowSeconds: 30 # stabilizationWindowSeconds: 30
...@@ -27,28 +27,28 @@ spec: ...@@ -27,28 +27,28 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 5 terminationGracePeriodSeconds: 5
containers: containers:
- name: server - name: server
image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 10008 - containerPort: 10008
- containerPort: 9192 - containerPort: 9192
env: env:
- name: LOG_LEVEL - name: LOG_LEVEL
value: "INFO" value: "INFO"
readinessProbe: readinessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"] command: ["/bin/grpc_health_probe", "-addr=:10008"]
livenessProbe: livenessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"] command: ["/bin/grpc_health_probe", "-addr=:10008"]
resources: resources:
requests: requests:
cpu: 250m cpu: 250m
memory: 128Mi memory: 128Mi
limits: limits:
cpu: 1000m cpu: 1000m
memory: 1024Mi memory: 1024Mi
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
...@@ -61,12 +61,12 @@ spec: ...@@ -61,12 +61,12 @@ spec:
selector: selector:
app: dbscanservingservice app: dbscanservingservice
ports: ports:
- name: grpc - name: grpc
port: 10008 port: 10008
targetPort: 10008 targetPort: 10008
- name: metrics - name: metrics
port: 9192 port: 9192
targetPort: 9192 targetPort: 9192
--- ---
apiVersion: autoscaling/v2 apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
...@@ -80,12 +80,12 @@ spec: ...@@ -80,12 +80,12 @@ spec:
minReplicas: 1 minReplicas: 1
maxReplicas: 20 maxReplicas: 20
metrics: metrics:
- type: Resource - type: Resource
resource: resource:
name: cpu name: cpu
target: target:
type: Utilization type: Utilization
averageUtilization: 80 averageUtilization: 80
#behavior: #behavior:
# scaleDown: # scaleDown:
# stabilizationWindowSeconds: 30 # stabilizationWindowSeconds: 30
...@@ -31,33 +31,33 @@ spec: ...@@ -31,33 +31,33 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 5 terminationGracePeriodSeconds: 5
containers: containers:
- name: server - name: server
image: labs.etsi.org:5050/tfs/controller/device:latest image: labs.etsi.org:5050/tfs/controller/device:latest
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 2020 - containerPort: 2020
- containerPort: 9192 - containerPort: 9192
env: env:
- name: LOG_LEVEL - name: LOG_LEVEL
value: "INFO" value: "INFO"
startupProbe: startupProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"] command: ["/bin/grpc_health_probe", "-addr=:2020"]
failureThreshold: 30 failureThreshold: 30
periodSeconds: 10 periodSeconds: 1
readinessProbe: readinessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"] command: ["/bin/grpc_health_probe", "-addr=:2020"]
livenessProbe: livenessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"] command: ["/bin/grpc_health_probe", "-addr=:2020"]
resources: resources:
requests: requests:
cpu: 250m cpu: 250m
memory: 128Mi memory: 128Mi
limits: limits:
cpu: 1000m cpu: 1000m
memory: 1024Mi memory: 1024Mi
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
...@@ -70,11 +70,11 @@ spec: ...@@ -70,11 +70,11 @@ spec:
selector: selector:
app: deviceservice app: deviceservice
ports: ports:
- name: grpc - name: grpc
protocol: TCP protocol: TCP
port: 2020 port: 2020
targetPort: 2020 targetPort: 2020
- name: metrics - name: metrics
protocol: TCP protocol: TCP
port: 9192 port: 9192
targetPort: 9192 targetPort: 9192
...@@ -27,57 +27,57 @@ spec: ...@@ -27,57 +27,57 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 5 terminationGracePeriodSeconds: 5
containers: containers:
- name: connector - name: connector
image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 8080 - containerPort: 8080
- containerPort: 9192 - containerPort: 9192
env: env:
- name: LOG_LEVEL - name: LOG_LEVEL
value: "INFO" value: "INFO"
## for debug purposes ## for debug purposes
#- name: DLT_GATEWAY_HOST #- name: DLT_GATEWAY_HOST
# value: "mock-blockchain.tfs-bchain.svc.cluster.local" # value: "mock-blockchain.tfs-bchain.svc.cluster.local"
#- name: DLT_GATEWAY_PORT #- name: DLT_GATEWAY_PORT
# value: "50051" # value: "50051"
readinessProbe: readinessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"] command: ["/bin/grpc_health_probe", "-addr=:8080"]
livenessProbe: livenessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"] command: ["/bin/grpc_health_probe", "-addr=:8080"]
resources: resources:
requests: requests:
cpu: 50m cpu: 50m
memory: 64Mi memory: 64Mi
limits: limits:
cpu: 500m cpu: 500m
memory: 512Mi memory: 512Mi
- name: gateway - name: gateway
image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 50051 - containerPort: 50051
#readinessProbe: #readinessProbe:
# httpGet: # httpGet:
# path: /health # path: /health
# port: 8081 # port: 8081
# initialDelaySeconds: 5 # initialDelaySeconds: 5
# timeoutSeconds: 5 # timeoutSeconds: 5
#livenessProbe: #livenessProbe:
# httpGet: # httpGet:
# path: /health # path: /health
# port: 8081 # port: 8081
# initialDelaySeconds: 5 # initialDelaySeconds: 5
# timeoutSeconds: 5 # timeoutSeconds: 5
resources: resources:
requests: requests:
cpu: 200m cpu: 200m
memory: 512Mi memory: 512Mi
limits: limits:
cpu: 700m cpu: 700m
memory: 1024Mi memory: 1024Mi
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
...@@ -90,11 +90,11 @@ spec: ...@@ -90,11 +90,11 @@ spec:
selector: selector:
app: dltservice app: dltservice
ports: ports:
- name: grpc - name: grpc
protocol: TCP protocol: TCP
port: 8080 port: 8080
targetPort: 8080 targetPort: 8080
- name: metrics - name: metrics
protocol: TCP protocol: TCP
port: 9192 port: 9192
targetPort: 9192 targetPort: 9192