Commits (556)
...@@ -24,22 +24,24 @@ stages: ...@@ -24,22 +24,24 @@ stages:
include: include:
#- local: '/manifests/.gitlab-ci.yml' #- local: '/manifests/.gitlab-ci.yml'
- local: '/src/monitoring/.gitlab-ci.yml' - local: '/src/monitoring/.gitlab-ci.yml'
- local: '/src/compute/.gitlab-ci.yml' - local: '/src/nbi/.gitlab-ci.yml'
- local: '/src/context/.gitlab-ci.yml' - local: '/src/context/.gitlab-ci.yml'
- local: '/src/device/.gitlab-ci.yml' - local: '/src/device/.gitlab-ci.yml'
- local: '/src/service/.gitlab-ci.yml' - local: '/src/service/.gitlab-ci.yml'
- local: '/src/dbscanserving/.gitlab-ci.yml' - local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml' - local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml' - local: '/src/opticalattackdetector/.gitlab-ci.yml'
# - local: '/src/opticalattackmanager/.gitlab-ci.yml' #- local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/automation/.gitlab-ci.yml' - local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml' - local: '/src/policy/.gitlab-ci.yml'
- local: '/src/forecaster/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml' #- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_attackmitigator/.gitlab-ci.yml' #- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
#- local: '/src/slice/.gitlab-ci.yml' - local: '/src/slice/.gitlab-ci.yml'
#- local: '/src/interdomain/.gitlab-ci.yml' #- local: '/src/interdomain/.gitlab-ci.yml'
- local: '/src/pathcomp/.gitlab-ci.yml' - local: '/src/pathcomp/.gitlab-ci.yml'
#- local: '/src/dlt/.gitlab-ci.yml' #- local: '/src/dlt/.gitlab-ci.yml'
- local: '/src/load_generator/.gitlab-ci.yml' - local: '/src/load_generator/.gitlab-ci.yml'
- local: '/src/bgpls_speaker/.gitlab-ci.yml'
...@@ -40,7 +40,7 @@ components deployed. ...@@ -40,7 +40,7 @@ components deployed.
## Impacted Components ## Impacted Components
List of impacted components: Context, Device, Service, PathComp, Slice, Monitoring, Automation, Policy, Compute, etc. List of impacted components: Context, Device, Service, PathComp, Slice, Monitoring, ZTP, Policy, NBI, etc.
Just an enumeration, elaboration of impacts is done below. Just an enumeration, elaboration of impacts is done below.
## Component1 Impact ## Component1 Impact
......
...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} ...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed # By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
# If not already set, set the tag you want to use for your images. # If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
......
...@@ -59,7 +59,7 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -59,7 +59,7 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Building Docker image..." echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
......
...@@ -38,20 +38,21 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" ...@@ -38,20 +38,21 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
TMP_FOLDER="./tmp" TMP_FOLDER="./tmp"
# Create a tmp folder for files modified during the deployment # Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER mkdir -p $TMP_MANIFESTS_FOLDER
TMP_LOGS_FOLDER="$TMP_FOLDER/logs" TMP_LOGS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/logs"
mkdir -p $TMP_LOGS_FOLDER mkdir -p $TMP_LOGS_FOLDER
echo "Deleting and Creating a new namespace..." echo "Deleting and Creating a new namespace..."
kubectl delete namespace $K8S_NAMESPACE kubectl delete namespace $K8S_NAMESPACE --ignore-not-found
kubectl create namespace $K8S_NAMESPACE kubectl create namespace $K8S_NAMESPACE
printf "\n" printf "\n"
echo "Deploying components and collecting environment variables..." echo "Deploying components and collecting environment variables..."
ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh
echo "# Environment variables for TeraFlow Mock-Blockchain deployment" > $ENV_VARS_SCRIPT echo "# Environment variables for TeraFlowSDN Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
PYTHONPATH=$(pwd)/src PYTHONPATH=$(pwd)/src
echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
echo "Processing '$COMPONENT' component..." echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$IMAGE_TAG" IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
...@@ -77,12 +78,12 @@ cp ./manifests/"${COMPONENT}".yaml "$MANIFEST" ...@@ -77,12 +78,12 @@ cp ./manifests/"${COMPONENT}".yaml "$MANIFEST"
if [ -n "$REGISTRY_IMAGE" ]; then if [ -n "$REGISTRY_IMAGE" ]; then
# Registry is set # Registry is set
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
else else
# Registry is not set # Registry is not set
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST" sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
fi fi
...@@ -91,8 +92,8 @@ echo " Deploying '$COMPONENT' component to Kubernetes..." ...@@ -91,8 +92,8 @@ echo " Deploying '$COMPONENT' component to Kubernetes..."
DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG" #kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG" #kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
echo " Collecting env-vars for '$COMPONENT' component..." echo " Collecting env-vars for '$COMPONENT' component..."
SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json) SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json)
......
...@@ -47,6 +47,10 @@ function nats_deploy_single() { ...@@ -47,6 +47,10 @@ function nats_deploy_single() {
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo echo
echo "Upgrade NATS Helm Chart"
helm3 repo update nats
echo
echo "Install NATS (single-node)" echo "Install NATS (single-node)"
echo ">>> Checking if NATS is deployed..." echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
...@@ -81,6 +85,10 @@ function nats_deploy_single() { ...@@ -81,6 +85,10 @@ function nats_deploy_single() {
echo "NATS Port Mapping" echo "NATS Port Mapping"
echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})" echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_PORT_CLIENT" ]; then
# NATS charts updated and port name changed from "client" to "nats"; fix to support new name and enable backward compatibility
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="nats")].port}')
fi
PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}' PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
......
...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} ...@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
# By default, only basic components are deployed # By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"} export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
# If not already set, set the tag you want to use for your images. # If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
...@@ -148,6 +148,10 @@ printf "\n" ...@@ -148,6 +148,10 @@ printf "\n"
echo "Create secret with NATS data" echo "Create secret with NATS data"
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_CLIENT_PORT" ]; then
# NATS charts updated and port name changed from "client" to "nats"; fix to support new name and enable backward compatibility
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="nats")].port}')
fi
kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \ --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \
--from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT}
...@@ -183,6 +187,22 @@ REDIS_PASSWORD=`uuidgen` ...@@ -183,6 +187,22 @@ REDIS_PASSWORD=`uuidgen`
kubectl create secret generic redis-secrets --namespace=$TFS_K8S_NAMESPACE \ kubectl create secret generic redis-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=REDIS_PASSWORD=$REDIS_PASSWORD --from-literal=REDIS_PASSWORD=$REDIS_PASSWORD
echo "export REDIS_PASSWORD=${REDIS_PASSWORD}" >> $ENV_VARS_SCRIPT echo "export REDIS_PASSWORD=${REDIS_PASSWORD}" >> $ENV_VARS_SCRIPT
printf "\n"
DOCKER_BUILD="docker build"
DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3)
if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
# If Docker version >= 23, build command was migrated to docker-buildx
# In Ubuntu, in practice, means to install package docker-buildx together with docker.io
# Check if docker-buildx plugin is installed
docker buildx version 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx"
echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx"
exit 1;
fi
DOCKER_BUILD="docker buildx build"
fi
for COMPONENT in $TFS_COMPONENTS; do for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..." echo "Processing '$COMPONENT' component..."
...@@ -191,25 +211,25 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -191,25 +211,25 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Building Docker image..." echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds # next command is redundant, but helpful to keep cache updated between rebuilds
IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
elif [ "$COMPONENT" == "dlt" ]; then elif [ "$COMPONENT" == "dlt" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
else else
docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi fi
echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
...@@ -344,11 +364,11 @@ for COMPONENT in $TFS_COMPONENTS; do ...@@ -344,11 +364,11 @@ for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..." echo "Waiting for '$COMPONENT' component..."
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl wait --namespace $TFS_K8S_NAMESPACE \ kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service --for='condition=available' --timeout=90s deployment/${COMPONENT_OBJNAME}service
printf "\n" printf "\n"
done done
if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then if [[ "$TFS_COMPONENTS" == *"monitoring"* ]] && [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
echo "Configuring WebUI DataStores and Dashboards..." echo "Configuring WebUI DataStores and Dashboards..."
sleep 5 sleep 5
...@@ -554,3 +574,9 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then ...@@ -554,3 +574,9 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
printf "\n\n" printf "\n\n"
fi fi
if [ "$DOCKER_BUILD" == "docker buildx build" ]; then
echo "Pruning Docker Buildx Cache..."
docker buildx prune --force
printf "\n\n"
fi
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
# If not already set, set the list of components you want to build images for, and deploy. # If not already set, set the list of components you want to build images for, and deploy.
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation policy service compute monitoring dbscanserving opticalattackmitigator opticalcentralizedattackdetector l3_attackmitigator l3_centralizedattackdetector webui"} export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp policy service nbi monitoring dbscanserving opticalattackmitigator opticalcentralizedattackdetector l3_attackmitigator l3_centralizedattackdetector webui"}
######################################################################################################################## ########################################################################################################################
# Automated steps start here # Automated steps start here
......
...@@ -60,22 +60,24 @@ docker exec -it clab-tfs-scenario-client2 bash ...@@ -60,22 +60,24 @@ docker exec -it clab-tfs-scenario-client2 bash
$ sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)" $ sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)"
## gNMI Capabilities request ## gNMI Capabilities request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify capabilities
## gNMI Get request ## gNMI Get request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interface[name=mgmt0] $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /interfaces/interface[name=mgmt0]
## gNMI Set request ## gNMI Set request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/name/host-name --update-value slr11 $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf set --update-path /system/config/hostname --update-value srl11
(we check the changed value) (we check the changed value)
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/name/host-name $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf get --path /system/config/hostname
## Subscribe request ## Subscribe request
$ gnmic -a clab-srlinux-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interface[name=mgmt0]/statistics $ gnmic -a clab-tfs-scenario-srl1 -u admin -p NokiaSrl1! --skip-verify -e json_ietf subscribe --path /interfaces/interface[name=mgmt0]/state/
(In another terminal, you can generate traffic) (In another terminal, you can generate traffic)
$ssh admin@clab-srlinux-srl1 $ssh admin@clab-tfs-scenario-srl1
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy. # Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device automation service compute monitoring webui" export TFS_COMPONENTS="context device ztp service nbi monitoring webui"
# Set the tag you want to use for your images. # Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev" export TFS_IMAGE_TAG="dev"
......
src/tests/hackfest3/
\ No newline at end of file
...@@ -16,14 +16,30 @@ ...@@ -16,14 +16,30 @@
# If not already set, set the list of components you want to install dependencies for. # If not already set, set the list of components you want to install dependencies for.
# By default, dependencies for all components are installed. # By default, dependencies for all components are installed.
# Components still not supported by this script: # Components still not supported by this script:
# automation & policy : implemented in Java # ztp & policy : implemented in Java
# dlt : under design # dlt : under design
# pathcomp : under design # pathcomp : under design
ALL_COMPONENTS="context device service compute monitoring webui interdomain slice" ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice"
ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector" ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector"
ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector" ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS} TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
# Some components require libyang built from source code
# - Ref: https://github.com/CESNET/libyang
# - Ref: https://github.com/CESNET/libyang-python/
echo "Installing libyang..."
sudo apt-get --yes --quiet --quiet update
sudo apt-get --yes --quiet --quiet install build-essential cmake libpcre2-dev python3-dev python3-cffi
mkdir libyang
git clone https://github.com/CESNET/libyang.git libyang
mkdir libyang/build
cd libyang/build
cmake -D CMAKE_BUILD_TYPE:String="Release" ..
make
sudo make install
sudo ldconfig
cd ../..
echo "Updating PIP, SetupTools and Wheel..." echo "Updating PIP, SetupTools and Wheel..."
pip install --upgrade pip # ensure next packages get the latest versions pip install --upgrade pip # ensure next packages get the latest versions
pip install --upgrade setuptools wheel # bring basic tooling for other requirements pip install --upgrade setuptools wheel # bring basic tooling for other requirements
...@@ -38,7 +54,7 @@ printf "\n" ...@@ -38,7 +54,7 @@ printf "\n"
echo "Collecting requirements from components..." echo "Collecting requirements from components..."
for COMPONENT in $TFS_COMPONENTS for COMPONENT in $TFS_COMPONENTS
do do
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then continue; fi if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then continue; fi
diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
done done
printf "\n" printf "\n"
......
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: bgpls-speakerservice
spec:
selector:
matchLabels:
app: bgpls-speakerservice
replicas: 1
template:
metadata:
labels:
app: bgpls-speakerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: localhost:32000/tfs/bgpls_speaker:dev
imagePullPolicy: Always
ports:
- containerPort: 20030
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20030"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: bgpls-speakerservice
labels:
app: bgpls-speakerservice
spec:
type: ClusterIP
selector:
app: bgpls-speakerservice
ports:
- name: grpc
protocol: TCP
port: 20030
targetPort: 20030
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
...@@ -41,6 +41,10 @@ spec: ...@@ -41,6 +41,10 @@ spec:
value: "nats" value: "nats"
- name: LOG_LEVEL - name: LOG_LEVEL
value: "INFO" value: "INFO"
- name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY
value: "FALSE"
- name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
value: "FALSE"
envFrom: envFrom:
- secretRef: - secretRef:
name: crdb-data name: crdb-data
......
...@@ -44,7 +44,7 @@ spec: ...@@ -44,7 +44,7 @@ spec:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"] command: ["/bin/grpc_health_probe", "-addr=:2020"]
failureThreshold: 30 failureThreshold: 30
periodSeconds: 10 periodSeconds: 1
readinessProbe: readinessProbe:
exec: exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"] command: ["/bin/grpc_health_probe", "-addr=:2020"]
......
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: e2e-orchestratorservice
spec:
selector:
matchLabels:
app: e2e-orchestratorservice
template:
metadata:
labels:
app: e2e-orchestratorservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/e2e_orchestrator:latest
imagePullPolicy: Always
ports:
- containerPort: 10050
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10050"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10050"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: e2e-orchestratorservice
labels:
app: e2e-orchestratorservice
spec:
type: ClusterIP
selector:
app: e2e-orchestratorservice
ports:
- name: grpc
port: 10050
targetPort: 10050
- name: metrics
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: e2e-orchestratorservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: e2e-orchestratorservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: forecasterservice
spec:
selector:
matchLabels:
app: forecasterservice
#replicas: 1
template:
metadata:
labels:
app: forecasterservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/forecaster:latest
imagePullPolicy: Always
ports:
- containerPort: 10040
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: FORECAST_TO_HISTORY_RATIO
value: "10"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
failureThreshold: 30
periodSeconds: 1
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10040"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: forecasterservice
labels:
app: forecasterservice
spec:
type: ClusterIP
selector:
app: forecasterservice
ports:
- name: grpc
protocol: TCP
port: 10040
targetPort: 10040
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: forecasterservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: forecasterservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
...@@ -15,21 +15,21 @@ ...@@ -15,21 +15,21 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: computeservice name: nbiservice
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: computeservice app: nbiservice
replicas: 1 replicas: 1
template: template:
metadata: metadata:
labels: labels:
app: computeservice app: nbiservice
spec: spec:
terminationGracePeriodSeconds: 5 terminationGracePeriodSeconds: 5
containers: containers:
- name: server - name: server
image: labs.etsi.org:5050/tfs/controller/compute:latest image: labs.etsi.org:5050/tfs/controller/nbi:latest
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 8080 - containerPort: 8080
...@@ -55,13 +55,13 @@ spec: ...@@ -55,13 +55,13 @@ spec:
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: computeservice name: nbiservice
labels: labels:
app: computeservice app: nbiservice
spec: spec:
type: ClusterIP type: ClusterIP
selector: selector:
app: computeservice app: nbiservice
ports: ports:
- name: http - name: http
protocol: TCP protocol: TCP
......
...@@ -40,6 +40,20 @@ spec: ...@@ -40,6 +40,20 @@ spec:
pathType: Prefix pathType: Prefix
backend: backend:
service: service:
name: computeservice name: nbiservice
port:
number: 8080
- path: /()(debug-api/.*)
pathType: Prefix
backend:
service:
name: nbiservice
port:
number: 8080
- path: /()(bmw/.*)
pathType: Prefix
backend:
service:
name: nbiservice
port: port:
number: 8080 number: 8080
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: opticalcontrollerservice
spec:
selector:
matchLabels:
app: opticalcontrollerservice
replicas: 1
template:
metadata:
labels:
app: opticalcontrollerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: localhost:32000/tfs/opticalcontroller:dev
imagePullPolicy: Never
ports:
- containerPort: 10060
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
#readinessProbe:
# exec:
# command: ["/bin/grpc_health_probe", "-addr=:10060"]
#livenessProbe:
# exec:
# command: ["/bin/grpc_health_probe", "-addr=:10060"]
resources:
requests:
cpu: 500m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: opticalcontrollerservice
labels:
app: opticalcontrollerservice
spec:
type: ClusterIP
selector:
app: opticalcontrollerservice
ports:
- name: grpc
protocol: TCP
port: 10060
targetPort: 10060
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192