Commits (251)
......@@ -17,10 +17,9 @@ stages:
#- dependencies
- build
- unit_test
#- deploy
#- end2end_test
- end2end_test
# include the individual .gitlab-ci.yml of each micro-service
# include the individual .gitlab-ci.yml of each micro-service and tests
include:
#- local: '/manifests/.gitlab-ci.yml'
- local: '/src/monitoring/.gitlab-ci.yml'
......@@ -31,7 +30,8 @@ include:
- local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml'
# - local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/opticalcontroller/.gitlab-ci.yml'
- local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml'
- local: '/src/forecaster/.gitlab-ci.yml'
......@@ -39,8 +39,12 @@ include:
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
#- local: '/src/slice/.gitlab-ci.yml'
- local: '/src/slice/.gitlab-ci.yml'
#- local: '/src/interdomain/.gitlab-ci.yml'
- local: '/src/pathcomp/.gitlab-ci.yml'
#- local: '/src/dlt/.gitlab-ci.yml'
- local: '/src/load_generator/.gitlab-ci.yml'
- local: '/src/bgpls_speaker/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml'
# ETSI TeraFlowSDN Controller
[ETSI OpenSource Group for TeraFlowSDN](https://tfs.etsi.org/)
The [ETSI Software Development Group TeraFlowSDN (SDG TFS)](https://tfs.etsi.org/) is developing an open source cloud native SDN controller enabling smart connectivity services for future networks beyond 5G.
Former, [Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows
The project originated from "[Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows", a project funded by the European Union’s Horizon 2020 Research and Innovation programme that finished on 30th June 2023.
Branch "master" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/master/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/master/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master)
Branch "develop" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop)
## Available branches and releases
# Installation Instructions and Functional Tests
The [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home) pages include details on using the ETSI TeraFlowSDN release 2.0.
[![Latest Release](https://labs.etsi.org/rep/tfs/controller/-/badges/release.svg)](https://labs.etsi.org/rep/tfs/controller/-/releases)
The documentation, installation instructions, and description of the functional tests defined to enable experimentation with the ETSI TeraFlowSDN Controller can be found in the Wiki pages
- The branch `master` ([![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/master/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/master/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master)), points always to the latest stable version of the TeraFlowSDN controller.
- The branches `release/X.Y.Z`, point to the code for the different release versions indicated in the branch name.
- Code in these branches can be considered stable, and no new features are planned.
- In case of bugs, point releases increasing revision number (Z) might be created.
- The `develop` ([![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop)) branch is the main development branch and contains the latest contributions.
- **Use it with care! It might not be stable.**
- The latest developments and contributions are added to this branch for testing and validation before reaching a release.
## Documentation
The [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home) pages include the main documentation for the ETSI TeraFlowSDN Controller.
The documentation includes project documentation, installation instructions, functional tests, supported NBIs and SBIs, etc.
......@@ -18,6 +18,11 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# ----- TeraFlowSDN ------------------------------------------------------------
......@@ -102,6 +107,15 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
# If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
# TESTING
# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
# with 3 replicas (set by default) will be deployed. It is convenient for production and
# provides scalability features.
export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
# If not already set, disable flag for re-deploying NATS from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
......
......@@ -40,6 +40,21 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
# Automated steps start here
########################################################################################################################
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
# Constants
GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
TMP_FOLDER="./tmp"
......@@ -60,17 +75,17 @@ for COMPONENT in $TFS_COMPONENTS; do
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
$DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
else
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi
if [ -n "$TFS_REGISTRY_IMAGES" ]; then
......
......@@ -18,6 +18,11 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# If not already set, set the namespace where CockroackDB will be deployed.
export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
......@@ -158,9 +163,19 @@ function crdb_undeploy_single() {
function crdb_drop_database_single() {
echo "Drop database if exists"
CRDB_PORT_SQL=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@cockroachdb-0:${CRDB_PORT_SQL}/defaultdb?sslmode=require"
kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
if [[ -z "${GITLAB_CI}" ]]; then
#kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o yaml
CRDB_HOST=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
CRDB_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
else
CRDB_HOST="127.0.0.1"
CRDB_PORT=${CRDB_EXT_PORT_SQL}
fi
CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@${CRDB_HOST}:${CRDB_PORT}/defaultdb?sslmode=require"
echo "CRDB_CLIENT_URL=${CRDB_CLIENT_URL}"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
echo
......@@ -213,7 +228,7 @@ function crdb_deploy_cluster() {
kubectl create namespace ${CRDB_NAMESPACE}
echo
echo "CockroachDB"
echo "CockroachDB (cluster-mode)"
echo ">>> Checking if CockroachDB is deployed..."
if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
echo ">>> CockroachDB is present; skipping step."
......@@ -343,14 +358,14 @@ function crdb_undeploy_cluster() {
function crdb_drop_database_cluster() {
echo "Drop database if exists"
kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \
"DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
echo
}
if [ "$CRDB_DEPLOY_MODE" == "single" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
crdb_undeploy_single
fi
......@@ -360,7 +375,7 @@ if [ "$CRDB_DEPLOY_MODE" == "single" ]; then
crdb_drop_database_single
fi
elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
crdb_undeploy_cluster
fi
......
......@@ -58,9 +58,24 @@ echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
IMAGE_URL=$(echo "$REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
docker build -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG"
if [ -n "$REGISTRY_IMAGE" ]; then
echo " Pushing Docker image to '$REGISTRY_IMAGE'..."
......
......@@ -18,6 +18,10 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# If not already set, set the namespace where NATS will be deployed.
export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
......@@ -27,16 +31,32 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
# If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
# TESTING
# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
# with 3 replicas (set by default) will be deployed. It is convenient for production and
# provides scalability features.
export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
# If not already set, disable flag for re-deploying NATS from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
########################################################################################################################
# Automated steps start here
########################################################################################################################
# Constants
TMP_FOLDER="./tmp"
NATS_MANIFESTS_PATH="manifests/nats"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER
function nats_deploy_single() {
echo "NATS Namespace"
echo ">>> Create NATS Namespace (if missing)"
......@@ -47,18 +67,85 @@ function nats_deploy_single() {
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo
echo "Install NATS (single-node)"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
echo ">>> NATS is present; skipping step."
else
echo ">>> Deploy NATS"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
echo ">>> Waiting NATS statefulset to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
printf "%c" "."
sleep 1
done
# Wait for statefulset condition "Available=True" does not work
# Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
# "error: readyReplicas is not found"
# Workaround: Check the pods are ready
#echo ">>> NATS statefulset created. Waiting for readiness condition..."
#kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats
#kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
# statefulset/nats
echo ">>> NATS statefulset created. Waiting NATS pods to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do
printf "%c" "."
sleep 1
done
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
fi
echo
echo "NATS Port Mapping"
echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})"
NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}')
PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
}
function nats_deploy_cluster() {
echo "NATS Namespace"
echo ">>> Create NATS Namespace (if missing)"
kubectl create namespace ${NATS_NAMESPACE}
echo
echo "Add NATS Helm Chart"
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo
echo "Upgrade NATS Helm Chart"
helm3 repo update nats
echo
echo "Install NATS (single-node)"
echo "Install NATS (cluster-mode)"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
echo ">>> NATS is present; skipping step."
else
echo ">>> Deploy NATS"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
echo ">>> Waiting NATS statefulset to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
printf "%c" "."
......@@ -78,7 +165,17 @@ function nats_deploy_single() {
printf "%c" "."
sleep 1
done
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do
printf "%c" "."
sleep 1
done
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do
printf "%c" "."
sleep 1
done
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2
fi
echo
......@@ -110,7 +207,7 @@ function nats_deploy_single() {
echo
}
function nats_undeploy_single() {
function nats_undeploy() {
echo "NATS"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
......@@ -127,8 +224,14 @@ function nats_undeploy_single() {
echo
}
if [ "$NATS_REDEPLOY" == "YES" ]; then
nats_undeploy_single
if [ "$NATS_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
nats_undeploy
fi
nats_deploy_single
if [ "$NATS_DEPLOY_MODE" == "single" ]; then
nats_deploy_single
elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then
nats_deploy_cluster
else
echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE"
fi
\ No newline at end of file
......@@ -18,6 +18,10 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# If not already set, set the namespace where QuestDB will be deployed.
export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
......@@ -160,17 +164,24 @@ function qdb_undeploy() {
}
function qdb_drop_tables() {
QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}')
QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
echo "Drop tables, if exist"
if [[ -z "${GITLAB_CI}" ]]; then
#kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o yaml
QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}')
QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
else
QDB_HOST="127.0.0.1"
QDB_PORT=${QDB_EXT_PORT_HTTP}
fi
curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;"
echo
curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;"
echo
}
if [ "$QDB_REDEPLOY" == "YES" ]; then
if [ "$QDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
qdb_undeploy
fi
......
......@@ -299,8 +299,13 @@ for COMPONENT in $TFS_COMPONENTS; do
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
else
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
if [ "$TFS_SKIP_BUILD" != "YES" ]; then
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
else
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$VERSION" | sed 's,//,/,g' | sed 's,http:/,,g')
fi
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
fi
......@@ -364,7 +369,13 @@ for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..."
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service
--for='condition=available' --timeout=90s deployment/${COMPONENT_OBJNAME}service
WAIT_EXIT_CODE=$?
if [[ $WAIT_EXIT_CODE != 0 ]]; then
echo " Failed to deploy '${COMPONENT}' component, exit code '${WAIT_EXIT_CODE}', exiting..."
kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/${COMPONENT_OBJNAME}service --all-containers=true
exit $WAIT_EXIT_CODE
fi
printf "\n"
done
......
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: bgpls-speakerservice
spec:
selector:
matchLabels:
app: bgpls-speakerservice
replicas: 1
template:
metadata:
labels:
app: bgpls-speakerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: localhost:32000/tfs/bgpls_speaker:dev
imagePullPolicy: Always
ports:
- containerPort: 20030
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: bgpls-speakerservice
labels:
app: bgpls-speakerservice
spec:
type: ClusterIP
selector:
app: bgpls-speakerservice
ports:
- name: grpc
protocol: TCP
port: 20030
targetPort: 20030
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -27,28 +27,28 @@ spec:
app: cachingservice
spec:
containers:
- name: redis
image: redis:7.0-alpine
env:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secrets
key: REDIS_PASSWORD
ports:
- containerPort: 6379
name: client
command: ["redis-server"]
args:
- --requirepass
- $(REDIS_PASSWORD)
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
- name: redis
image: redis:7.0-alpine
env:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secrets
key: REDIS_PASSWORD
ports:
- containerPort: 6379
name: client
command: ["redis-server"]
args:
- --requirepass
- $(REDIS_PASSWORD)
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
---
apiVersion: v1
kind: Service
......@@ -59,6 +59,6 @@ spec:
selector:
app: cachingservice
ports:
- name: redis
port: 6379
targetPort: 6379
- name: redis
port: 6379
targetPort: 6379
......@@ -33,14 +33,16 @@ spec:
resources:
requests:
# This is intentionally low to make it work on local k3d clusters.
cpu: 4
memory: 4Gi
# TESTING
cpu: 1 #4
memory: 500Mi #4Gi
limits:
cpu: 8
memory: 8Gi
# TESTING
cpu: 1 #8
memory: 1Gi #8Gi
tlsEnabled: true
# You can set either a version of the db or a specific image name
# cockroachDBVersion: v22.2.8
# You can set either a version of the db or a specific image name
# cockroachDBVersion: v22.2.8
image:
name: cockroachdb/cockroach:v22.2.8
# nodes refers to the number of crdb pods that are created
......@@ -49,21 +51,17 @@ spec:
additionalLabels:
crdb: is-cool
# affinity is a new API field that is behind a feature gate that is
# disabled by default. To enable please see the operator.yaml file.
# disabled by default. To enable please see the operator.yaml file.
# The affinity field will accept any podSpec affinity rule.
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - cockroachdb
# topologyKey: kubernetes.io/hostname
# TESTING: Force one pod per node, if possible
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/instance: cockroachdb
# nodeSelectors used to match against
# nodeSelector:
......
......@@ -381,6 +381,8 @@ spec:
spec:
containers:
- args:
# TESTING
- -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true
- -zap-log-level
- info
env:
......
......@@ -30,39 +30,39 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/context:latest
imagePullPolicy: Always
ports:
- containerPort: 1010
- containerPort: 9192
env:
- name: MB_BACKEND
value: "nats"
- name: LOG_LEVEL
value: "INFO"
- name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY
value: "FALSE"
- name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
value: "FALSE"
envFrom:
- secretRef:
name: crdb-data
- secretRef:
name: nats-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/context:latest
imagePullPolicy: Always
ports:
- containerPort: 1010
- containerPort: 9192
env:
- name: MB_BACKEND
value: "nats"
- name: LOG_LEVEL
value: "INFO"
- name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY
value: "FALSE"
- name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
value: "FALSE"
envFrom:
- secretRef:
name: crdb-data
- secretRef:
name: nats-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -75,14 +75,14 @@ spec:
selector:
app: contextservice
ports:
- name: grpc
protocol: TCP
port: 1010
targetPort: 1010
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 1010
targetPort: 1010
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......@@ -96,12 +96,12 @@ spec:
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -27,28 +27,28 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest
imagePullPolicy: Always
ports:
- containerPort: 10008
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest
imagePullPolicy: Always
ports:
- containerPort: 10008
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -61,12 +61,12 @@ spec:
selector:
app: dbscanservingservice
ports:
- name: grpc
port: 10008
targetPort: 10008
- name: metrics
port: 9192
targetPort: 9192
- name: grpc
port: 10008
targetPort: 10008
- name: metrics
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......@@ -80,12 +80,12 @@ spec:
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -31,33 +31,33 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/device:latest
imagePullPolicy: Always
ports:
- containerPort: 2020
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
failureThreshold: 30
periodSeconds: 1
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/device:latest
imagePullPolicy: Always
ports:
- containerPort: 2020
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
failureThreshold: 30
periodSeconds: 1
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -78,3 +78,22 @@ spec:
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: deviceservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: deviceservice
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
......@@ -27,57 +27,57 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: connector
image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
## for debug purposes
#- name: DLT_GATEWAY_HOST
# value: "mock-blockchain.tfs-bchain.svc.cluster.local"
#- name: DLT_GATEWAY_PORT
# value: "50051"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
- name: gateway
image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest
imagePullPolicy: Always
ports:
- containerPort: 50051
#readinessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
#livenessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 700m
memory: 1024Mi
- name: connector
image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
## for debug purposes
#- name: DLT_GATEWAY_HOST
# value: "mock-blockchain.tfs-bchain.svc.cluster.local"
#- name: DLT_GATEWAY_PORT
# value: "50051"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
- name: gateway
image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest
imagePullPolicy: Always
ports:
- containerPort: 50051
#readinessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
#livenessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 700m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -90,11 +90,11 @@ spec:
selector:
app: dltservice
ports:
- name: grpc
protocol: TCP
port: 8080
targetPort: 8080
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 8080
targetPort: 8080
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -27,28 +27,28 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/e2e_orchestrator:latest
imagePullPolicy: Always
ports:
- containerPort: 10050
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10050"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10050"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/e2e_orchestrator:latest
imagePullPolicy: Always
ports:
- containerPort: 10050
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10050"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10050"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -61,12 +61,12 @@ spec:
selector:
app: e2e-orchestratorservice
ports:
- name: grpc
port: 10050
targetPort: 10050
- name: metrics
port: 9192
targetPort: 9192
- name: grpc
port: 10050
targetPort: 10050
- name: metrics
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......@@ -80,12 +80,12 @@ spec:
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -28,35 +28,35 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/forecaster:latest
imagePullPolicy: Always
ports:
- containerPort: 10040
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: FORECAST_TO_HISTORY_RATIO
value: "10"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
failureThreshold: 30
periodSeconds: 1
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/forecaster:latest
imagePullPolicy: Always
ports:
- containerPort: 10040
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: FORECAST_TO_HISTORY_RATIO
value: "10"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
failureThreshold: 30
periodSeconds: 1
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -69,14 +69,14 @@ spec:
selector:
app: forecasterservice
ports:
- name: grpc
protocol: TCP
port: 10040
targetPort: 10040
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 10040
targetPort: 10040
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......@@ -90,12 +90,12 @@ spec:
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -27,30 +27,30 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/interdomain:latest
imagePullPolicy: Always
ports:
- containerPort: 10010
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: TOPOLOGY_ABSTRACTOR
value: "DISABLE"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10010"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10010"]
resources:
requests:
cpu: 250m
memory: 64Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/interdomain:latest
imagePullPolicy: Always
ports:
- containerPort: 10010
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: TOPOLOGY_ABSTRACTOR
value: "DISABLE"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10010"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10010"]
resources:
requests:
cpu: 250m
memory: 64Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -63,11 +63,11 @@ spec:
selector:
app: interdomainservice
ports:
- name: grpc
protocol: TCP
port: 10010
targetPort: 10010
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 10010
targetPort: 10010
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192