Commits (635)
......@@ -17,29 +17,34 @@ stages:
#- dependencies
- build
- unit_test
#- deploy
#- end2end_test
- end2end_test
# include the individual .gitlab-ci.yml of each micro-service
# include the individual .gitlab-ci.yml of each micro-service and tests
include:
#- local: '/manifests/.gitlab-ci.yml'
- local: '/src/monitoring/.gitlab-ci.yml'
- local: '/src/compute/.gitlab-ci.yml'
- local: '/src/nbi/.gitlab-ci.yml'
- local: '/src/context/.gitlab-ci.yml'
- local: '/src/device/.gitlab-ci.yml'
- local: '/src/service/.gitlab-ci.yml'
- local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml'
# - local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/automation/.gitlab-ci.yml'
- local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/opticalcontroller/.gitlab-ci.yml'
- local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml'
- local: '/src/forecaster/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
#- local: '/src/slice/.gitlab-ci.yml'
- local: '/src/slice/.gitlab-ci.yml'
#- local: '/src/interdomain/.gitlab-ci.yml'
- local: '/src/pathcomp/.gitlab-ci.yml'
#- local: '/src/dlt/.gitlab-ci.yml'
- local: '/src/load_generator/.gitlab-ci.yml'
- local: '/src/bgpls_speaker/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml'
......@@ -40,7 +40,7 @@ components deployed.
## Impacted Components
List of impacted components: Context, Device, Service, PathComp, Slice, Monitoring, Automation, Policy, Compute, etc.
List of impacted components: Context, Device, Service, PathComp, Slice, Monitoring, ZTP, Policy, NBI, etc.
Just an enumeration, elaboration of impacts is done below.
## Component1 Impact
......
# ETSI TeraFlowSDN Controller
[ETSI OpenSource Group for TeraFlowSDN](https://tfs.etsi.org/)
The [ETSI Software Development Group TeraFlowSDN (SDG TFS)](https://tfs.etsi.org/) is developing an open source cloud native SDN controller enabling smart connectivity services for future networks beyond 5G.
Former, [Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows
The project originated from "[Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows", a project funded by the European Union’s Horizon 2020 Research and Innovation programme that finished on 30th June 2023.
Branch "master" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/master/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/master/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master)
Branch "develop" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop)
## Available branches and releases
# Installation Instructions and Functional Tests
The [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home) pages include details on using the ETSI TeraFlowSDN release 2.0.
[![Latest Release](https://labs.etsi.org/rep/tfs/controller/-/badges/release.svg)](https://labs.etsi.org/rep/tfs/controller/-/releases)
The documentation, installation instructions, and description of the functional tests defined to enable experimentation with the ETSI TeraFlowSDN Controller can be found in the Wiki pages
- The branch `master` ([![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/master/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/master/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master)), points always to the latest stable version of the TeraFlowSDN controller.
- The branches `release/X.Y.Z`, point to the code for the different release versions indicated in the branch name.
- Code in these branches can be considered stable, and no new features are planned.
- In case of bugs, point releases increasing revision number (Z) might be created.
- The `develop` ([![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop)) branch is the main development branch and contains the latest contributions.
- **Use it with care! It might not be stable.**
- The latest developments and contributions are added to this branch for testing and validation before reaching a release.
## Documentation
The [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home) pages include the main documentation for the ETSI TeraFlowSDN Controller.
The documentation includes project documentation, installation instructions, functional tests, supported NBIs and SBIs, etc.
......@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"}
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
# If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
......
......@@ -40,6 +40,21 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
# Automated steps start here
########################################################################################################################
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
# Constants
GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
TMP_FOLDER="./tmp"
......@@ -59,18 +74,18 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
$DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
else
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi
if [ -n "$TFS_REGISTRY_IMAGES" ]; then
......
......@@ -158,9 +158,19 @@ function crdb_undeploy_single() {
function crdb_drop_database_single() {
echo "Drop database if exists"
CRDB_PORT_SQL=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@cockroachdb-0:${CRDB_PORT_SQL}/defaultdb?sslmode=require"
kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
if [[ -z "${GITLAB_CI}" ]]; then
#kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o yaml
CRDB_HOST=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
CRDB_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
else
CRDB_HOST="127.0.0.1"
CRDB_PORT=${CRDB_EXT_PORT_SQL}
fi
CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@${CRDB_HOST}:${CRDB_PORT}/defaultdb?sslmode=require"
echo "CRDB_CLIENT_URL=${CRDB_CLIENT_URL}"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
echo
......@@ -343,7 +353,7 @@ function crdb_undeploy_cluster() {
function crdb_drop_database_cluster() {
echo "Drop database if exists"
kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \
"DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
echo
......
......@@ -38,28 +38,44 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
TMP_FOLDER="./tmp"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER
TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
TMP_LOGS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/logs"
mkdir -p $TMP_LOGS_FOLDER
echo "Deleting and Creating a new namespace..."
kubectl delete namespace $K8S_NAMESPACE
kubectl delete namespace $K8S_NAMESPACE --ignore-not-found
kubectl create namespace $K8S_NAMESPACE
printf "\n"
echo "Deploying components and collecting environment variables..."
ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh
echo "# Environment variables for TeraFlow Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
echo "# Environment variables for TeraFlowSDN Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
PYTHONPATH=$(pwd)/src
echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
IMAGE_URL=$(echo "$REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
docker build -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG"
if [ -n "$REGISTRY_IMAGE" ]; then
echo " Pushing Docker image to '$REGISTRY_IMAGE'..."
......@@ -77,12 +93,12 @@ cp ./manifests/"${COMPONENT}".yaml "$MANIFEST"
if [ -n "$REGISTRY_IMAGE" ]; then
# Registry is set
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
else
# Registry is not set
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
fi
......@@ -91,8 +107,8 @@ echo " Deploying '$COMPONENT' component to Kubernetes..."
DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
#kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
#kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
echo " Collecting env-vars for '$COMPONENT' component..."
SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json)
......
......@@ -47,6 +47,10 @@ function nats_deploy_single() {
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo
echo "Upgrade NATS Helm Chart"
helm3 repo update nats
echo
echo "Install NATS (single-node)"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
......@@ -81,6 +85,10 @@ function nats_deploy_single() {
echo "NATS Port Mapping"
echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_PORT_CLIENT" ]; then
# NATS charts updated and port name changed from "client" to "nats"; fix to support new name and enable backward compatibility
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="nats")].port}')
fi
PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
......
......@@ -160,10 +160,17 @@ function qdb_undeploy() {
}
function qdb_drop_tables() {
QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}')
QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
echo "Drop tables, if exist"
if [[ -z "${GITLAB_CI}" ]]; then
#kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o yaml
QDB_HOST=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.clusterIP}')
QDB_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
else
QDB_HOST="127.0.0.1"
QDB_PORT=${QDB_EXT_PORT_HTTP}
fi
curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_MONITORING_KPIS}+;"
echo
curl "http://${QDB_HOST}:${QDB_PORT}/exec?fmt=json&query=DROP+TABLE+IF+EXISTS+${QDB_TABLE_SLICE_GROUPS}+;"
......
......@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"}
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
# If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
......@@ -148,6 +148,10 @@ printf "\n"
echo "Create secret with NATS data"
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_CLIENT_PORT" ]; then
# NATS charts updated and port name changed from "client" to "nats"; fix to support new name and enable backward compatibility
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="nats")].port}')
fi
kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \
--from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT}
......@@ -183,6 +187,22 @@ REDIS_PASSWORD=`uuidgen`
kubectl create secret generic redis-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=REDIS_PASSWORD=$REDIS_PASSWORD
echo "export REDIS_PASSWORD=${REDIS_PASSWORD}" >> $ENV_VARS_SCRIPT
printf "\n"
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..."
......@@ -191,25 +211,25 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
$DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds
IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
$DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
elif [ "$COMPONENT" == "dlt" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
else
docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
$DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi
echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
......@@ -279,8 +299,13 @@ for COMPONENT in $TFS_COMPONENTS; do
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
else
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
if [ "$TFS_SKIP_BUILD" != "YES" ]; then
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
else
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$VERSION" | sed 's,//,/,g' | sed 's,http:/,,g')
fi
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
fi
......@@ -344,11 +369,17 @@ for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..."
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service
--for='condition=available' --timeout=90s deployment/${COMPONENT_OBJNAME}service
WAIT_EXIT_CODE=$?
if [[ $WAIT_EXIT_CODE != 0 ]]; then
echo " Failed to deploy '${COMPONENT}' component, exit code '${WAIT_EXIT_CODE}', exiting..."
kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/${COMPONENT_OBJNAME}service --all-containers=true
exit $WAIT_EXIT_CODE
fi
printf "\n"
done
if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
if [[ "$TFS_COMPONENTS" == *"monitoring"* ]] && [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
echo "Configuring WebUI DataStores and Dashboards..."
sleep 5
......@@ -554,3 +585,9 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
printf "\n\n"
fi
if [ "$DOCKER_BUILD" == "docker buildx build" ]; then
echo "Pruning Docker Buildx Cache..."
docker buildx prune --force
printf "\n\n"
fi
......@@ -60,22 +60,24 @@ docker exec -it clab-tfs-scenario-client2 bash
$ sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)"
## gNMI Capabilities request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities
$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities
## gNMI Get request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interface[name=mgmt0]
$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interfaces/interface[name=mgmt0]
## gNMI Set request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/name/host-name --update-value slr11
$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/config/hostname --update-value srl11
(we check the changed value)
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name
(we check the changed value)
$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
## Subscribe request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interface[name=mgmt0]/statistics
$ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interfaces/interface[name=mgmt0]/state/
(In another terminal, you can generate traffic)
$ssh admin@clab-srlinux-srl1
$ssh admin@clab-tfs-scenario-srl1
......
......@@ -16,7 +16,7 @@
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device automation service compute monitoring webui"
export TFS_COMPONENTS="context device ztp service nbi monitoring webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......
src/tests/hackfest3/
\ No newline at end of file
......@@ -16,14 +16,30 @@
# If not already set, set the list of components you want to install dependencies for.
# By default, dependencies for all components are installed.
# Components still not supported by this script:
# automation & policy : implemented in Java
# ztp & policy : implemented in Java
# dlt : under design
# pathcomp : under design
ALL_COMPONENTS="context device service compute monitoring webui interdomain slice"
ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice"
ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector"
ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
# Some components require libyang built from source code
# - Ref: https://github.com/CESNET/libyang
# - Ref: https://github.com/CESNET/libyang-python/
echo "Installing libyang..."
sudo apt-get --yes --quiet --quiet update
sudo apt-get --yes --quiet --quiet install build-essential cmake libpcre2-dev python3-dev python3-cffi
mkdir libyang
git clone https://github.com/CESNET/libyang.git libyang
mkdir libyang/build
cd libyang/build
cmake -D CMAKE_BUILD_TYPE:String="Release" ..
make
sudo make install
sudo ldconfig
cd ../..
echo "Updating PIP, SetupTools and Wheel..."
pip install --upgrade pip # ensure next packages get the latest versions
pip install --upgrade setuptools wheel # bring basic tooling for other requirements
......@@ -38,7 +54,7 @@ printf "\n"
echo "Collecting requirements from components..."
for COMPONENT in $TFS_COMPONENTS
do
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then continue; fi
if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then continue; fi
diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
done
printf "\n"
......
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: bgpls-speakerservice
spec:
selector:
matchLabels:
app: bgpls-speakerservice
replicas: 1
template:
metadata:
labels:
app: bgpls-speakerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: localhost:32000/tfs/bgpls_speaker:dev
imagePullPolicy: Always
ports:
- containerPort: 20030
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: bgpls-speakerservice
labels:
app: bgpls-speakerservice
spec:
type: ClusterIP
selector:
app: bgpls-speakerservice
ports:
- name: grpc
protocol: TCP
port: 20030
targetPort: 20030
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -27,28 +27,28 @@ spec:
app: cachingservice
spec:
containers:
- name: redis
image: redis:7.0-alpine
env:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secrets
key: REDIS_PASSWORD
ports:
- containerPort: 6379
name: client
command: ["redis-server"]
args:
- --requirepass
- $(REDIS_PASSWORD)
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
- name: redis
image: redis:7.0-alpine
env:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secrets
key: REDIS_PASSWORD
ports:
- containerPort: 6379
name: client
command: ["redis-server"]
args:
- --requirepass
- $(REDIS_PASSWORD)
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
---
apiVersion: v1
kind: Service
......@@ -59,6 +59,6 @@ spec:
selector:
app: cachingservice
ports:
- name: redis
port: 6379
targetPort: 6379
- name: redis
port: 6379
targetPort: 6379
......@@ -30,35 +30,39 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/context:latest
imagePullPolicy: Always
ports:
- containerPort: 1010
- containerPort: 9192
env:
- name: MB_BACKEND
value: "nats"
- name: LOG_LEVEL
value: "INFO"
envFrom:
- secretRef:
name: crdb-data
- secretRef:
name: nats-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/context:latest
imagePullPolicy: Always
ports:
- containerPort: 1010
- containerPort: 9192
env:
- name: MB_BACKEND
value: "nats"
- name: LOG_LEVEL
value: "INFO"
- name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY
value: "FALSE"
- name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
value: "FALSE"
envFrom:
- secretRef:
name: crdb-data
- secretRef:
name: nats-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -71,14 +75,14 @@ spec:
selector:
app: contextservice
ports:
- name: grpc
protocol: TCP
port: 1010
targetPort: 1010
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 1010
targetPort: 1010
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......@@ -92,12 +96,12 @@ spec:
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -27,28 +27,28 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest
imagePullPolicy: Always
ports:
- containerPort: 10008
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest
imagePullPolicy: Always
ports:
- containerPort: 10008
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10008"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -61,12 +61,12 @@ spec:
selector:
app: dbscanservingservice
ports:
- name: grpc
port: 10008
targetPort: 10008
- name: metrics
port: 9192
targetPort: 9192
- name: grpc
port: 10008
targetPort: 10008
- name: metrics
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......@@ -80,12 +80,12 @@ spec:
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -31,33 +31,33 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/device:latest
imagePullPolicy: Always
ports:
- containerPort: 2020
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
failureThreshold: 30
periodSeconds: 10
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/device:latest
imagePullPolicy: Always
ports:
- containerPort: 2020
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
failureThreshold: 30
periodSeconds: 1
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -70,11 +70,11 @@ spec:
selector:
app: deviceservice
ports:
- name: grpc
protocol: TCP
port: 2020
targetPort: 2020
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 2020
targetPort: 2020
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -27,57 +27,57 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: connector
image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
## for debug purposes
#- name: DLT_GATEWAY_HOST
# value: "mock-blockchain.tfs-bchain.svc.cluster.local"
#- name: DLT_GATEWAY_PORT
# value: "50051"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
- name: gateway
image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest
imagePullPolicy: Always
ports:
- containerPort: 50051
#readinessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
#livenessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 700m
memory: 1024Mi
- name: connector
image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
## for debug purposes
#- name: DLT_GATEWAY_HOST
# value: "mock-blockchain.tfs-bchain.svc.cluster.local"
#- name: DLT_GATEWAY_PORT
# value: "50051"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:8080"]
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
- name: gateway
image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest
imagePullPolicy: Always
ports:
- containerPort: 50051
#readinessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
#livenessProbe:
# httpGet:
# path: /health
# port: 8081
# initialDelaySeconds: 5
# timeoutSeconds: 5
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 700m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -90,11 +90,11 @@ spec:
selector:
app: dltservice
ports:
- name: grpc
protocol: TCP
port: 8080
targetPort: 8080
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 8080
targetPort: 8080
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192