From 826e1bac8738fec7b5369b6bffffe648751fc57d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Mon, 11 Dec 2023 17:03:55 +0000 Subject: [PATCH 01/11] HPA in services HPA in webui service --- manifests/deviceservice.yaml | 35 ++++++++++++++++++++++++-------- manifests/monitoringservice.yaml | 35 ++++++++++++++++++++++++-------- manifests/webuiservice.yaml | 35 ++++++++++++++++++++++++++------ 3 files changed, 83 insertions(+), 22 deletions(-) diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index fdc3cea02..6181fc63a 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -70,11 +70,30 @@ spec: selector: app: deviceservice ports: - - name: grpc - protocol: TCP - port: 2020 - targetPort: 2020 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 2020 + targetPort: 2020 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: deviceservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: deviceservice + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml index 3a4d43cd9..6700afdd6 100644 --- a/manifests/monitoringservice.yaml +++ b/manifests/monitoringservice.yaml @@ -65,11 +65,30 @@ spec: selector: app: monitoringservice ports: - - name: grpc - protocol: TCP - port: 7070 - targetPort: 7070 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 7070 + targetPort: 7070 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: monitoringservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: monitoringservice + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index bb2573c45..87fe57719 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -111,9 +111,32 @@ spec: selector: app: webuiservice ports: - - name: webui - port: 8004 - targetPort: 8004 - - name: grafana - port: 3000 - targetPort: 3000 + - name: webui + port: 8004 + targetPort: 8004 + - name: grafana + port: 3000 + targetPort: 3000 +# TESTING +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: webuiservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: webuiservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 -- GitLab From 6763315984de86baf9d4979609650fb443d875a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Wed, 22 Nov 2023 15:54:26 +0000 Subject: [PATCH 02/11] Added rate limiting to ingress controller --- manifests/nginx_ingress_http.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index 2b63e5c1c..f150e4c86 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -18,6 +18,11 @@ metadata: name: tfs-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/limit-rps: '2' + nginx.ingress.kubernetes.io/limit-connections: '5' + nginx.ingress.kubernetes.io/proxy-connect-timeout: '10' + nginx.ingress.kubernetes.io/proxy-send-timeout: '10' + nginx.ingress.kubernetes.io/proxy-read-timeout: '10' spec: rules: - http: -- GitLab From f1a63185fbd81f1d92ccf3a7a2d4d76b540a46fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Fri, 2 Feb 2024 13:52:55 +0000 Subject: [PATCH 03/11] NGINX and redeployall default variables set code refactoring NATS cluster complete Startup Probe failling in NATS cluster mode Cockroach cluster operator and NATS cluster mode Update Update scheduling policy for CRDB NATS cluster mode Testing CRDB cluster with node affinity Revert "Testing dynamic node resources" This reverts commit 856eb4799d2136697c721b387e6fca9fdcdbf5fd. Testing dynamic node resources NGINX and redeployall Update my_deploy.sh Update nginx_ingress_http.yaml Redeploy all fixed Add redeploy all feature --- deploy/all.sh | 14 ++++ deploy/crdb.sh | 11 ++- deploy/nats.sh | 119 ++++++++++++++++++++++++++-- deploy/qdb.sh | 6 +- manifests/cockroachdb/cluster.yaml | 36 ++++----- manifests/cockroachdb/operator.yaml | 2 + manifests/nats/cluster.yaml | 34 ++++++++ manifests/nginx_ingress_http.yaml | 4 +- my_deploy.sh | 12 ++- 9 files changed, 204 insertions(+), 34 deletions(-) create mode 100644 manifests/nats/cluster.yaml diff --git a/deploy/all.sh b/deploy/all.sh index 25d69b485..50a6c0816 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -18,6 +18,11 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ + +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + # ----- TeraFlowSDN ------------------------------------------------------------ @@ -102,6 +107,15 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} +# TESTING +# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. +# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster +# with 3 replicas (set by default) will be deployed. It is convenient for production and +# provides scalability features. +export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"} + # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. diff --git a/deploy/crdb.sh b/deploy/crdb.sh index a304e83d1..2a8bd88d3 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -18,6 +18,11 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + + # If not already set, set the namespace where CockroackDB will be deployed. export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} @@ -223,7 +228,7 @@ function crdb_deploy_cluster() { kubectl create namespace ${CRDB_NAMESPACE} echo - echo "CockroachDB" + echo "CockroachDB (cluster-mode)" echo ">>> Checking if CockroachDB is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then echo ">>> CockroachDB is present; skipping step." @@ -360,7 +365,7 @@ function crdb_drop_database_cluster() { } if [ "$CRDB_DEPLOY_MODE" == "single" ]; then - if [ "$CRDB_REDEPLOY" == "YES" ]; then + if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then crdb_undeploy_single fi @@ -370,7 +375,7 @@ if [ "$CRDB_DEPLOY_MODE" == "single" ]; then crdb_drop_database_single fi elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then - if [ "$CRDB_REDEPLOY" == "YES" ]; then + if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then crdb_undeploy_cluster fi diff --git a/deploy/nats.sh b/deploy/nats.sh index 004f67c44..d6922d86b 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -18,6 +18,10 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} @@ -27,16 +31,32 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} +# TESTING +# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. +# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster +# with 3 replicas (set by default) will be deployed. It is convenient for production and +# provides scalability features. +export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"} + # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. export NATS_REDEPLOY=${NATS_REDEPLOY:-""} - ######################################################################################################################## # Automated steps start here ######################################################################################################################## +# Constants +TMP_FOLDER="./tmp" +NATS_MANIFESTS_PATH="manifests/nats" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests" +mkdir -p $TMP_MANIFESTS_FOLDER + function nats_deploy_single() { echo "NATS Namespace" echo ">>> Create NATS Namespace (if missing)" @@ -47,18 +67,85 @@ function nats_deploy_single() { helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ echo + echo "Install NATS (single-node)" + echo ">>> Checking if NATS is deployed..." + if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then + echo ">>> NATS is present; skipping step." + else + echo ">>> Deploy NATS" + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine + + echo ">>> Waiting NATS statefulset to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do + printf "%c" "." + sleep 1 + done + + # Wait for statefulset condition "Available=True" does not work + # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error: + # "error: readyReplicas is not found" + # Workaround: Check the pods are ready + #echo ">>> NATS statefulset created. Waiting for readiness condition..." + #kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats + #kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \ + # statefulset/nats + echo ">>> NATS statefulset created. Waiting NATS pods to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do + printf "%c" "." + sleep 1 + done + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0 + fi + echo + + echo "NATS Port Mapping" + echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})" + NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') + PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})" + NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}') + PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo +} + + +function nats_deploy_cluster() { + echo "NATS Namespace" + echo ">>> Create NATS Namespace (if missing)" + kubectl create namespace ${NATS_NAMESPACE} + echo + + echo "Add NATS Helm Chart" + helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ + echo + echo "Upgrade NATS Helm Chart" helm3 repo update nats echo - echo "Install NATS (single-node)" + echo "Install NATS (cluster-mode)" echo ">>> Checking if NATS is deployed..." if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then echo ">>> NATS is present; skipping step." else echo ">>> Deploy NATS" - helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine - + cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml" + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml" + echo ">>> Waiting NATS statefulset to be created..." while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do printf "%c" "." @@ -78,7 +165,17 @@ function nats_deploy_single() { printf "%c" "." sleep 1 done + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do + printf "%c" "." + sleep 1 + done + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do + printf "%c" "." + sleep 1 + done kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0 + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1 + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2 fi echo @@ -110,7 +207,7 @@ function nats_deploy_single() { echo } -function nats_undeploy_single() { +function nats_undeploy() { echo "NATS" echo ">>> Checking if NATS is deployed..." if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then @@ -127,8 +224,14 @@ function nats_undeploy_single() { echo } -if [ "$NATS_REDEPLOY" == "YES" ]; then - nats_undeploy_single +if [ "$NATS_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then + nats_undeploy fi -nats_deploy_single +if [ "$NATS_DEPLOY_MODE" == "single" ]; then + nats_deploy_single +elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then + nats_deploy_cluster +else + echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE" +fi \ No newline at end of file diff --git a/deploy/qdb.sh b/deploy/qdb.sh index 3235c6c82..52d2fc7db 100755 --- a/deploy/qdb.sh +++ b/deploy/qdb.sh @@ -18,6 +18,10 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} @@ -177,7 +181,7 @@ function qdb_drop_tables() { echo } -if [ "$QDB_REDEPLOY" == "YES" ]; then +if [ "$QDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then qdb_undeploy fi diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml index 4d9ef0f84..73875ca3f 100644 --- a/manifests/cockroachdb/cluster.yaml +++ b/manifests/cockroachdb/cluster.yaml @@ -33,14 +33,16 @@ spec: resources: requests: # This is intentionally low to make it work on local k3d clusters. - cpu: 4 - memory: 4Gi + # TESTING + cpu: 1 #4 + memory: 500Mi #4Gi limits: - cpu: 8 - memory: 8Gi + # TESTING + cpu: 1 #8 + memory: 1Gi #8Gi tlsEnabled: true -# You can set either a version of the db or a specific image name -# cockroachDBVersion: v22.2.8 + # You can set either a version of the db or a specific image name + # cockroachDBVersion: v22.2.8 image: name: cockroachdb/cockroach:v22.2.8 # nodes refers to the number of crdb pods that are created @@ -49,21 +51,17 @@ spec: additionalLabels: crdb: is-cool # affinity is a new API field that is behind a feature gate that is - # disabled by default. To enable please see the operator.yaml file. + # disabled by default. To enable please see the operator.yaml file. # The affinity field will accept any podSpec affinity rule. - # affinity: - # podAntiAffinity: - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 100 - # podAffinityTerm: - # labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - cockroachdb - # topologyKey: kubernetes.io/hostname + # TESTING: Force one pod per node, if possible + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/instance: cockroachdb # nodeSelectors used to match against # nodeSelector: diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml index 59d515061..0d578410c 100644 --- a/manifests/cockroachdb/operator.yaml +++ b/manifests/cockroachdb/operator.yaml @@ -381,6 +381,8 @@ spec: spec: containers: - args: + # TESTING + - -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true - -zap-log-level - info env: diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml new file mode 100644 index 000000000..39e41958f --- /dev/null +++ b/manifests/nats/cluster.yaml @@ -0,0 +1,34 @@ +container: + image: + tags: 2.9-alpine + env: + # different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB + # should be ~90% of memory limit + GOMEMLIMIT: 400MiB + merge: + # recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters + resources: + requests: + cpu: 1 # 2 + memory: 500Mi # 4Gi + limits: + cpu: 1 # 4 + memory: 1Gi # 8Gi + +config: + cluster: + enabled: true + replicas: 3 + jetstream: + enabled: true + fileStore: + pvc: + size: 4Gi + +# Force one pod per node, if possible +podTemplate: + topologySpreadConstraints: + kubernetes.io/hostname: + maxSkew: 1 + whenUnsatisfiable: ScheduleAnyway + \ No newline at end of file diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index f150e4c86..e91f62242 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -18,8 +18,8 @@ metadata: name: tfs-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/limit-rps: '2' - nginx.ingress.kubernetes.io/limit-connections: '5' + nginx.ingress.kubernetes.io/limit-rps: '5' + nginx.ingress.kubernetes.io/limit-connections: '10' nginx.ingress.kubernetes.io/proxy-connect-timeout: '10' nginx.ingress.kubernetes.io/proxy-send-timeout: '10' nginx.ingress.kubernetes.io/proxy-read-timeout: '10' diff --git a/my_deploy.sh b/my_deploy.sh index 7dd5e5c3e..92a1bfb63 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -13,6 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +# ----- Redeploy All ------------------------------------------------------------ + +# If not already set, enables all components redeployment +export REDEPLOYALL="" + # ----- TeraFlowSDN ------------------------------------------------------------ @@ -96,7 +101,7 @@ export CRDB_DATABASE="tfs" # Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. # See ./deploy/all.sh or ./deploy/crdb.sh for additional details -export CRDB_DEPLOY_MODE="single" +export CRDB_DEPLOY_MODE="cluster" # Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" @@ -116,6 +121,11 @@ export NATS_EXT_PORT_CLIENT="4222" # Set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP="8222" +# TESTING +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + # Disable flag for re-deploying NATS from scratch. export NATS_REDEPLOY="" -- GitLab From cb8eb2dc4e153dd4b0a7f3ef46cda7f8f854f64a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Wed, 31 Jan 2024 16:29:09 +0000 Subject: [PATCH 04/11] CRDB and NATS cluter mode Restore default values --- deploy/all.sh | 1 - deploy/nats.sh | 4 ++-- manifests/cockroachdb/cluster.yaml | 11 ++++------- manifests/cockroachdb/operator.yaml | 1 - manifests/nats/cluster.yaml | 8 ++++---- manifests/webuiservice.yaml | 1 - my_deploy.sh | 3 +-- 7 files changed, 11 insertions(+), 18 deletions(-) diff --git a/deploy/all.sh b/deploy/all.sh index 50a6c0816..63d202960 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -107,7 +107,6 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} -# TESTING # If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. # - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. diff --git a/deploy/nats.sh b/deploy/nats.sh index d6922d86b..02e22965e 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -31,7 +31,6 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} -# TESTING # If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. # - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. @@ -73,7 +72,8 @@ function nats_deploy_single() { echo ">>> NATS is present; skipping step." else echo ">>> Deploy NATS" - helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true + echo ">>> Waiting NATS statefulset to be created..." while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml index 73875ca3f..bcb0c7049 100644 --- a/manifests/cockroachdb/cluster.yaml +++ b/manifests/cockroachdb/cluster.yaml @@ -33,13 +33,11 @@ spec: resources: requests: # This is intentionally low to make it work on local k3d clusters. - # TESTING - cpu: 1 #4 - memory: 500Mi #4Gi + cpu: 4 + memory: 4Gi limits: - # TESTING - cpu: 1 #8 - memory: 1Gi #8Gi + cpu: 8 + memory: 8Gi tlsEnabled: true # You can set either a version of the db or a specific image name # cockroachDBVersion: v22.2.8 @@ -54,7 +52,6 @@ spec: # disabled by default. To enable please see the operator.yaml file. # The affinity field will accept any podSpec affinity rule. - # TESTING: Force one pod per node, if possible topologySpreadConstraints: - maxSkew: 1 topologyKey: kubernetes.io/hostname diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml index 0d578410c..d8e691308 100644 --- a/manifests/cockroachdb/operator.yaml +++ b/manifests/cockroachdb/operator.yaml @@ -381,7 +381,6 @@ spec: spec: containers: - args: - # TESTING - -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true - -zap-log-level - info diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml index 39e41958f..491c86628 100644 --- a/manifests/nats/cluster.yaml +++ b/manifests/nats/cluster.yaml @@ -9,11 +9,11 @@ container: # recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters resources: requests: - cpu: 1 # 2 - memory: 500Mi # 4Gi + cpu: 1 + memory: 500Mi limits: - cpu: 1 # 4 - memory: 1Gi # 8Gi + cpu: 1 + memory: 1Gi config: cluster: diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 87fe57719..d7e7c9777 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -117,7 +117,6 @@ spec: - name: grafana port: 3000 targetPort: 3000 -# TESTING --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler diff --git a/my_deploy.sh b/my_deploy.sh index 92a1bfb63..bc8ff56a9 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -101,7 +101,7 @@ export CRDB_DATABASE="tfs" # Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. # See ./deploy/all.sh or ./deploy/crdb.sh for additional details -export CRDB_DEPLOY_MODE="cluster" +export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" @@ -121,7 +121,6 @@ export NATS_EXT_PORT_CLIENT="4222" # Set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP="8222" -# TESTING # Set NATS installation mode to 'single'. This option is convenient for development and testing. # See ./deploy/all.sh or ./deploy/nats.sh for additional details export NATS_DEPLOY_MODE="single" -- GitLab From 8b3b07612812f571d554302d315257bee79760dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Wed, 24 Apr 2024 15:46:39 +0100 Subject: [PATCH 05/11] Container lab fixed --- hackfest/containerlab/srl1.cli | 14 ++++++++++++++ hackfest/containerlab/srl2.cli | 14 ++++++++++++++ hackfest/containerlab/tfs-scenario.clab.yml | 12 +++++++++--- 3 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 hackfest/containerlab/srl1.cli create mode 100644 hackfest/containerlab/srl2.cli diff --git a/hackfest/containerlab/srl1.cli b/hackfest/containerlab/srl1.cli new file mode 100644 index 000000000..fd7144ca7 --- /dev/null +++ b/hackfest/containerlab/srl1.cli @@ -0,0 +1,14 @@ +set / interface ethernet-1/2 admin-state enable +set / interface ethernet-1/2 subinterface 0 admin-state enable +set / interface ethernet-1/2 subinterface 0 ipv4 address 172.16.1.1/24 + +set / interface ethernet-1/1 admin-state enable +set / interface ethernet-1/1 subinterface 0 admin-state enable +set / interface ethernet-1/1 subinterface 0 ipv4 address 172.0.0.1/30 + +set / network-instance default +set / network-instance default interface ethernet-1/1.0 +set / network-instance default interface ethernet-1/2.0 + +set / network-instance default next-hop-groups group group1 nexthop 1 ip-address 172.0.0.2 admin-state enable +set / network-instance default static-routes route 172.16.2.0/24 next-hop-group group1 admin-state enable \ No newline at end of file diff --git a/hackfest/containerlab/srl2.cli b/hackfest/containerlab/srl2.cli new file mode 100644 index 000000000..395d53c71 --- /dev/null +++ b/hackfest/containerlab/srl2.cli @@ -0,0 +1,14 @@ +set / interface ethernet-1/2 admin-state enable +set / interface ethernet-1/2 subinterface 0 admin-state enable +set / interface ethernet-1/2 subinterface 0 ipv4 address 172.16.2.1/24 + +set / interface ethernet-1/1 admin-state enable +set / interface ethernet-1/1 subinterface 0 admin-state enable +set / interface ethernet-1/1 subinterface 0 ipv4 address 172.0.0.2/30 + +set / network-instance default +set / network-instance default interface ethernet-1/1.0 +set / network-instance default interface ethernet-1/2.0 + +set /network-instance default next-hop-groups group group1 nexthop 1 ip-address 172.0.0.1 admin-state enable +set /network-instance default static-routes route 172.16.1.0/24 next-hop-group group1 admin-state enable \ No newline at end of file diff --git a/hackfest/containerlab/tfs-scenario.clab.yml b/hackfest/containerlab/tfs-scenario.clab.yml index df197ebea..c26d46c26 100644 --- a/hackfest/containerlab/tfs-scenario.clab.yml +++ b/hackfest/containerlab/tfs-scenario.clab.yml @@ -24,7 +24,7 @@ mgmt: topology: kinds: srl: - image: ghcr.io/nokia/srlinux:23.3.1 + image: ghcr.io/nokia/srlinux:21.11.3 linux: image: ghcr.io/hellt/network-multitool nodes: @@ -34,24 +34,30 @@ topology: cpu: 0.5 memory: 1GB mgmt-ipv4: 172.100.100.101 - #startup-config: srl1.cli + startup-config: ./srl1.cli srl2: kind: srl type: ixr6 cpu: 0.5 memory: 1GB mgmt-ipv4: 172.100.100.102 - #startup-config: srl2.cli + startup-config: ./srl2.cli client1: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.201 + exec: + - ip address add 172.16.1.10/24 dev eth1 + - ip route add 172.16.2.0/24 via 172.16.1.1 client2: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.202 + exec: + - ip address add 172.16.2.10/24 dev eth1 + - ip route add 172.16.1.0/24 via 172.16.2.1 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] -- GitLab From 908b88b4da1ad7e533004d73c9ba56120fa1ad47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Mon, 11 Dec 2023 17:03:55 +0000 Subject: [PATCH 06/11] HPA in services HPA in webui service --- manifests/deviceservice.yaml | 35 ++++++++++++++++++++++++-------- manifests/monitoringservice.yaml | 35 ++++++++++++++++++++++++-------- manifests/webuiservice.yaml | 35 ++++++++++++++++++++++++++------ 3 files changed, 83 insertions(+), 22 deletions(-) diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index e49ba2399..bf599d0b4 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -70,11 +70,30 @@ spec: selector: app: deviceservice ports: - - name: grpc - protocol: TCP - port: 2020 - targetPort: 2020 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 2020 + targetPort: 2020 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: deviceservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: deviceservice + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml index 1540db0a1..4058436e5 100644 --- a/manifests/monitoringservice.yaml +++ b/manifests/monitoringservice.yaml @@ -65,11 +65,30 @@ spec: selector: app: monitoringservice ports: - - name: grpc - protocol: TCP - port: 7070 - targetPort: 7070 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 7070 + targetPort: 7070 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: monitoringservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: monitoringservice + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index a519aa4a2..58e1a65a0 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -111,9 +111,32 @@ spec: selector: app: webuiservice ports: - - name: webui - port: 8004 - targetPort: 8004 - - name: grafana - port: 3000 - targetPort: 3000 + - name: webui + port: 8004 + targetPort: 8004 + - name: grafana + port: 3000 + targetPort: 3000 +# TESTING +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: webuiservice-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: webuiservice + minReplicas: 1 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + #behavior: + # scaleDown: + # stabilizationWindowSeconds: 30 -- GitLab From 503a17bd49c8a2052915f2a6b59565ae7e2af241 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Wed, 22 Nov 2023 15:54:26 +0000 Subject: [PATCH 07/11] Added rate limiting to ingress controller --- manifests/nginx_ingress_http.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index 0892f0c9b..210848fa9 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -18,6 +18,11 @@ metadata: name: tfs-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/limit-rps: '2' + nginx.ingress.kubernetes.io/limit-connections: '5' + nginx.ingress.kubernetes.io/proxy-connect-timeout: '10' + nginx.ingress.kubernetes.io/proxy-send-timeout: '10' + nginx.ingress.kubernetes.io/proxy-read-timeout: '10' spec: rules: - http: -- GitLab From 14de559fbbbacad009f4d2688b53b4d1c5270847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Fri, 2 Feb 2024 13:52:55 +0000 Subject: [PATCH 08/11] NGINX and redeployall default variables set code refactoring NATS cluster complete Startup Probe failling in NATS cluster mode Cockroach cluster operator and NATS cluster mode Update Update scheduling policy for CRDB NATS cluster mode Testing CRDB cluster with node affinity Revert "Testing dynamic node resources" This reverts commit 856eb4799d2136697c721b387e6fca9fdcdbf5fd. Testing dynamic node resources NGINX and redeployall Update my_deploy.sh Update nginx_ingress_http.yaml Redeploy all fixed Add redeploy all feature --- deploy/all.sh | 14 ++++ deploy/crdb.sh | 11 ++- deploy/nats.sh | 119 ++++++++++++++++++++++++++-- deploy/qdb.sh | 6 +- manifests/cockroachdb/cluster.yaml | 36 ++++----- manifests/cockroachdb/operator.yaml | 2 + manifests/nats/cluster.yaml | 34 ++++++++ manifests/nginx_ingress_http.yaml | 4 +- my_deploy.sh | 12 ++- 9 files changed, 204 insertions(+), 34 deletions(-) create mode 100644 manifests/nats/cluster.yaml diff --git a/deploy/all.sh b/deploy/all.sh index c169bc92c..204bbcfe2 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -18,6 +18,11 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ + +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + # ----- TeraFlowSDN ------------------------------------------------------------ @@ -102,6 +107,15 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} +# TESTING +# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. +# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster +# with 3 replicas (set by default) will be deployed. It is convenient for production and +# provides scalability features. +export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"} + # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. diff --git a/deploy/crdb.sh b/deploy/crdb.sh index c979ad4f2..6412d1316 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -18,6 +18,11 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + + # If not already set, set the namespace where CockroackDB will be deployed. export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} @@ -223,7 +228,7 @@ function crdb_deploy_cluster() { kubectl create namespace ${CRDB_NAMESPACE} echo - echo "CockroachDB" + echo "CockroachDB (cluster-mode)" echo ">>> Checking if CockroachDB is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then echo ">>> CockroachDB is present; skipping step." @@ -360,7 +365,7 @@ function crdb_drop_database_cluster() { } if [ "$CRDB_DEPLOY_MODE" == "single" ]; then - if [ "$CRDB_REDEPLOY" == "YES" ]; then + if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then crdb_undeploy_single fi @@ -370,7 +375,7 @@ if [ "$CRDB_DEPLOY_MODE" == "single" ]; then crdb_drop_database_single fi elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then - if [ "$CRDB_REDEPLOY" == "YES" ]; then + if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then crdb_undeploy_cluster fi diff --git a/deploy/nats.sh b/deploy/nats.sh index 366270a69..9cc11ca8b 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -18,6 +18,10 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} @@ -27,16 +31,32 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} +# TESTING +# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. +# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster +# with 3 replicas (set by default) will be deployed. It is convenient for production and +# provides scalability features. +export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"} + # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. export NATS_REDEPLOY=${NATS_REDEPLOY:-""} - ######################################################################################################################## # Automated steps start here ######################################################################################################################## +# Constants +TMP_FOLDER="./tmp" +NATS_MANIFESTS_PATH="manifests/nats" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests" +mkdir -p $TMP_MANIFESTS_FOLDER + function nats_deploy_single() { echo "NATS Namespace" echo ">>> Create NATS Namespace (if missing)" @@ -47,18 +67,85 @@ function nats_deploy_single() { helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ echo + echo "Install NATS (single-node)" + echo ">>> Checking if NATS is deployed..." + if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then + echo ">>> NATS is present; skipping step." + else + echo ">>> Deploy NATS" + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine + + echo ">>> Waiting NATS statefulset to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do + printf "%c" "." + sleep 1 + done + + # Wait for statefulset condition "Available=True" does not work + # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error: + # "error: readyReplicas is not found" + # Workaround: Check the pods are ready + #echo ">>> NATS statefulset created. Waiting for readiness condition..." + #kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats + #kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \ + # statefulset/nats + echo ">>> NATS statefulset created. Waiting NATS pods to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do + printf "%c" "." + sleep 1 + done + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0 + fi + echo + + echo "NATS Port Mapping" + echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})" + NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') + PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})" + NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}') + PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo +} + + +function nats_deploy_cluster() { + echo "NATS Namespace" + echo ">>> Create NATS Namespace (if missing)" + kubectl create namespace ${NATS_NAMESPACE} + echo + + echo "Add NATS Helm Chart" + helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ + echo + echo "Upgrade NATS Helm Chart" helm3 repo update nats echo - echo "Install NATS (single-node)" + echo "Install NATS (cluster-mode)" echo ">>> Checking if NATS is deployed..." if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then echo ">>> NATS is present; skipping step." else echo ">>> Deploy NATS" - helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine - + cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml" + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml" + echo ">>> Waiting NATS statefulset to be created..." while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do printf "%c" "." @@ -78,7 +165,17 @@ function nats_deploy_single() { printf "%c" "." sleep 1 done + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do + printf "%c" "." + sleep 1 + done + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do + printf "%c" "." + sleep 1 + done kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0 + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1 + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2 fi echo @@ -110,7 +207,7 @@ function nats_deploy_single() { echo } -function nats_undeploy_single() { +function nats_undeploy() { echo "NATS" echo ">>> Checking if NATS is deployed..." if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then @@ -127,8 +224,14 @@ function nats_undeploy_single() { echo } -if [ "$NATS_REDEPLOY" == "YES" ]; then - nats_undeploy_single +if [ "$NATS_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then + nats_undeploy fi -nats_deploy_single +if [ "$NATS_DEPLOY_MODE" == "single" ]; then + nats_deploy_single +elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then + nats_deploy_cluster +else + echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE" +fi \ No newline at end of file diff --git a/deploy/qdb.sh b/deploy/qdb.sh index acbcfd4f9..513ef9ae0 100755 --- a/deploy/qdb.sh +++ b/deploy/qdb.sh @@ -18,6 +18,10 @@ # Read deployment settings ######################################################################################################################## +# ----- Redeploy All ------------------------------------------------------------ +# If not already set, enables all components redeployment +export REDEPLOYALL=${REDEPLOYALL:-""} + # If not already set, set the namespace where QuestDB will be deployed. export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"} @@ -177,7 +181,7 @@ function qdb_drop_tables() { echo } -if [ "$QDB_REDEPLOY" == "YES" ]; then +if [ "$QDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then qdb_undeploy fi diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml index 4d9ef0f84..73875ca3f 100644 --- a/manifests/cockroachdb/cluster.yaml +++ b/manifests/cockroachdb/cluster.yaml @@ -33,14 +33,16 @@ spec: resources: requests: # This is intentionally low to make it work on local k3d clusters. - cpu: 4 - memory: 4Gi + # TESTING + cpu: 1 #4 + memory: 500Mi #4Gi limits: - cpu: 8 - memory: 8Gi + # TESTING + cpu: 1 #8 + memory: 1Gi #8Gi tlsEnabled: true -# You can set either a version of the db or a specific image name -# cockroachDBVersion: v22.2.8 + # You can set either a version of the db or a specific image name + # cockroachDBVersion: v22.2.8 image: name: cockroachdb/cockroach:v22.2.8 # nodes refers to the number of crdb pods that are created @@ -49,21 +51,17 @@ spec: additionalLabels: crdb: is-cool # affinity is a new API field that is behind a feature gate that is - # disabled by default. To enable please see the operator.yaml file. + # disabled by default. To enable please see the operator.yaml file. # The affinity field will accept any podSpec affinity rule. - # affinity: - # podAntiAffinity: - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 100 - # podAffinityTerm: - # labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - cockroachdb - # topologyKey: kubernetes.io/hostname + # TESTING: Force one pod per node, if possible + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/instance: cockroachdb # nodeSelectors used to match against # nodeSelector: diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml index 59d515061..0d578410c 100644 --- a/manifests/cockroachdb/operator.yaml +++ b/manifests/cockroachdb/operator.yaml @@ -381,6 +381,8 @@ spec: spec: containers: - args: + # TESTING + - -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true - -zap-log-level - info env: diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml new file mode 100644 index 000000000..39e41958f --- /dev/null +++ b/manifests/nats/cluster.yaml @@ -0,0 +1,34 @@ +container: + image: + tags: 2.9-alpine + env: + # different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB + # should be ~90% of memory limit + GOMEMLIMIT: 400MiB + merge: + # recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters + resources: + requests: + cpu: 1 # 2 + memory: 500Mi # 4Gi + limits: + cpu: 1 # 4 + memory: 1Gi # 8Gi + +config: + cluster: + enabled: true + replicas: 3 + jetstream: + enabled: true + fileStore: + pvc: + size: 4Gi + +# Force one pod per node, if possible +podTemplate: + topologySpreadConstraints: + kubernetes.io/hostname: + maxSkew: 1 + whenUnsatisfiable: ScheduleAnyway + \ No newline at end of file diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index 210848fa9..cb400ee7d 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -18,8 +18,8 @@ metadata: name: tfs-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/limit-rps: '2' - nginx.ingress.kubernetes.io/limit-connections: '5' + nginx.ingress.kubernetes.io/limit-rps: '5' + nginx.ingress.kubernetes.io/limit-connections: '10' nginx.ingress.kubernetes.io/proxy-connect-timeout: '10' nginx.ingress.kubernetes.io/proxy-send-timeout: '10' nginx.ingress.kubernetes.io/proxy-read-timeout: '10' diff --git a/my_deploy.sh b/my_deploy.sh index 8417f6eae..0b7a259de 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -13,6 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +# ----- Redeploy All ------------------------------------------------------------ + +# If not already set, enables all components redeployment +export REDEPLOYALL="" + # ----- TeraFlowSDN ------------------------------------------------------------ @@ -103,7 +108,7 @@ export CRDB_DATABASE="tfs" # Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. # See ./deploy/all.sh or ./deploy/crdb.sh for additional details -export CRDB_DEPLOY_MODE="single" +export CRDB_DEPLOY_MODE="cluster" # Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" @@ -123,6 +128,11 @@ export NATS_EXT_PORT_CLIENT="4222" # Set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP="8222" +# TESTING +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + # Disable flag for re-deploying NATS from scratch. export NATS_REDEPLOY="" -- GitLab From f9f0d2823708c7f18c76c503ea0bd792800493d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Wed, 31 Jan 2024 16:29:09 +0000 Subject: [PATCH 09/11] CRDB and NATS cluter mode Restore default values --- deploy/all.sh | 1 - deploy/nats.sh | 4 ++-- manifests/cockroachdb/cluster.yaml | 11 ++++------- manifests/cockroachdb/operator.yaml | 1 - manifests/nats/cluster.yaml | 8 ++++---- manifests/webuiservice.yaml | 1 - my_deploy.sh | 3 +-- 7 files changed, 11 insertions(+), 18 deletions(-) diff --git a/deploy/all.sh b/deploy/all.sh index 204bbcfe2..c5d423a2f 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -107,7 +107,6 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} -# TESTING # If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. # - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. diff --git a/deploy/nats.sh b/deploy/nats.sh index 9cc11ca8b..57fda629c 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -31,7 +31,6 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"} # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"} -# TESTING # If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'. # - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for # development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. @@ -73,7 +72,8 @@ function nats_deploy_single() { echo ">>> NATS is present; skipping step." else echo ">>> Deploy NATS" - helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine + helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true + echo ">>> Waiting NATS statefulset to be created..." while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml index 73875ca3f..bcb0c7049 100644 --- a/manifests/cockroachdb/cluster.yaml +++ b/manifests/cockroachdb/cluster.yaml @@ -33,13 +33,11 @@ spec: resources: requests: # This is intentionally low to make it work on local k3d clusters. - # TESTING - cpu: 1 #4 - memory: 500Mi #4Gi + cpu: 4 + memory: 4Gi limits: - # TESTING - cpu: 1 #8 - memory: 1Gi #8Gi + cpu: 8 + memory: 8Gi tlsEnabled: true # You can set either a version of the db or a specific image name # cockroachDBVersion: v22.2.8 @@ -54,7 +52,6 @@ spec: # disabled by default. To enable please see the operator.yaml file. # The affinity field will accept any podSpec affinity rule. - # TESTING: Force one pod per node, if possible topologySpreadConstraints: - maxSkew: 1 topologyKey: kubernetes.io/hostname diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml index 0d578410c..d8e691308 100644 --- a/manifests/cockroachdb/operator.yaml +++ b/manifests/cockroachdb/operator.yaml @@ -381,7 +381,6 @@ spec: spec: containers: - args: - # TESTING - -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true - -zap-log-level - info diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml index 39e41958f..491c86628 100644 --- a/manifests/nats/cluster.yaml +++ b/manifests/nats/cluster.yaml @@ -9,11 +9,11 @@ container: # recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters resources: requests: - cpu: 1 # 2 - memory: 500Mi # 4Gi + cpu: 1 + memory: 500Mi limits: - cpu: 1 # 4 - memory: 1Gi # 8Gi + cpu: 1 + memory: 1Gi config: cluster: diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 58e1a65a0..132839edd 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -117,7 +117,6 @@ spec: - name: grafana port: 3000 targetPort: 3000 -# TESTING --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler diff --git a/my_deploy.sh b/my_deploy.sh index 0b7a259de..991c21e71 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -108,7 +108,7 @@ export CRDB_DATABASE="tfs" # Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. # See ./deploy/all.sh or ./deploy/crdb.sh for additional details -export CRDB_DEPLOY_MODE="cluster" +export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. export CRDB_DROP_DATABASE_IF_EXISTS="" @@ -128,7 +128,6 @@ export NATS_EXT_PORT_CLIENT="4222" # Set the external port NATS HTTP Mgmt GUI interface will be exposed to. export NATS_EXT_PORT_HTTP="8222" -# TESTING # Set NATS installation mode to 'single'. This option is convenient for development and testing. # See ./deploy/all.sh or ./deploy/nats.sh for additional details export NATS_DEPLOY_MODE="single" -- GitLab From 610f10e79f4d22d927239994e916ace3d61f8b9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Wed, 24 Apr 2024 15:46:39 +0100 Subject: [PATCH 10/11] Container lab fixed --- hackfest/containerlab/srl1.cli | 14 ++++++++++++++ hackfest/containerlab/srl2.cli | 14 ++++++++++++++ hackfest/containerlab/tfs-scenario.clab.yml | 12 +++++++++--- 3 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 hackfest/containerlab/srl1.cli create mode 100644 hackfest/containerlab/srl2.cli diff --git a/hackfest/containerlab/srl1.cli b/hackfest/containerlab/srl1.cli new file mode 100644 index 000000000..fd7144ca7 --- /dev/null +++ b/hackfest/containerlab/srl1.cli @@ -0,0 +1,14 @@ +set / interface ethernet-1/2 admin-state enable +set / interface ethernet-1/2 subinterface 0 admin-state enable +set / interface ethernet-1/2 subinterface 0 ipv4 address 172.16.1.1/24 + +set / interface ethernet-1/1 admin-state enable +set / interface ethernet-1/1 subinterface 0 admin-state enable +set / interface ethernet-1/1 subinterface 0 ipv4 address 172.0.0.1/30 + +set / network-instance default +set / network-instance default interface ethernet-1/1.0 +set / network-instance default interface ethernet-1/2.0 + +set / network-instance default next-hop-groups group group1 nexthop 1 ip-address 172.0.0.2 admin-state enable +set / network-instance default static-routes route 172.16.2.0/24 next-hop-group group1 admin-state enable \ No newline at end of file diff --git a/hackfest/containerlab/srl2.cli b/hackfest/containerlab/srl2.cli new file mode 100644 index 000000000..395d53c71 --- /dev/null +++ b/hackfest/containerlab/srl2.cli @@ -0,0 +1,14 @@ +set / interface ethernet-1/2 admin-state enable +set / interface ethernet-1/2 subinterface 0 admin-state enable +set / interface ethernet-1/2 subinterface 0 ipv4 address 172.16.2.1/24 + +set / interface ethernet-1/1 admin-state enable +set / interface ethernet-1/1 subinterface 0 admin-state enable +set / interface ethernet-1/1 subinterface 0 ipv4 address 172.0.0.2/30 + +set / network-instance default +set / network-instance default interface ethernet-1/1.0 +set / network-instance default interface ethernet-1/2.0 + +set /network-instance default next-hop-groups group group1 nexthop 1 ip-address 172.0.0.1 admin-state enable +set /network-instance default static-routes route 172.16.1.0/24 next-hop-group group1 admin-state enable \ No newline at end of file diff --git a/hackfest/containerlab/tfs-scenario.clab.yml b/hackfest/containerlab/tfs-scenario.clab.yml index f79378757..91467d2b9 100644 --- a/hackfest/containerlab/tfs-scenario.clab.yml +++ b/hackfest/containerlab/tfs-scenario.clab.yml @@ -24,7 +24,7 @@ mgmt: topology: kinds: srl: - image: ghcr.io/nokia/srlinux:23.3.1 + image: ghcr.io/nokia/srlinux:21.11.3 linux: image: ghcr.io/hellt/network-multitool nodes: @@ -34,24 +34,30 @@ topology: cpu: 0.5 memory: 1GB mgmt-ipv4: 172.100.100.101 - #startup-config: srl1.cli + startup-config: ./srl1.cli srl2: kind: srl type: ixr6 cpu: 0.5 memory: 1GB mgmt-ipv4: 172.100.100.102 - #startup-config: srl2.cli + startup-config: ./srl2.cli client1: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.201 + exec: + - ip address add 172.16.1.10/24 dev eth1 + - ip route add 172.16.2.0/24 via 172.16.1.1 client2: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.202 + exec: + - ip address add 172.16.2.10/24 dev eth1 + - ip route add 172.16.1.0/24 via 172.16.2.1 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] -- GitLab From 220e0fd5f6f2f7a64ea2ebf58f2876d295fc1e13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Ara=C3=BAjo?= Date: Mon, 29 Apr 2024 16:30:27 +0100 Subject: [PATCH 11/11] Revert "Container lab fixed" This reverts commit 610f10e79f4d22d927239994e916ace3d61f8b9a. --- hackfest/containerlab/srl1.cli | 14 -------------- hackfest/containerlab/srl2.cli | 14 -------------- hackfest/containerlab/tfs-scenario.clab.yml | 12 +++--------- 3 files changed, 3 insertions(+), 37 deletions(-) delete mode 100644 hackfest/containerlab/srl1.cli delete mode 100644 hackfest/containerlab/srl2.cli diff --git a/hackfest/containerlab/srl1.cli b/hackfest/containerlab/srl1.cli deleted file mode 100644 index fd7144ca7..000000000 --- a/hackfest/containerlab/srl1.cli +++ /dev/null @@ -1,14 +0,0 @@ -set / interface ethernet-1/2 admin-state enable -set / interface ethernet-1/2 subinterface 0 admin-state enable -set / interface ethernet-1/2 subinterface 0 ipv4 address 172.16.1.1/24 - -set / interface ethernet-1/1 admin-state enable -set / interface ethernet-1/1 subinterface 0 admin-state enable -set / interface ethernet-1/1 subinterface 0 ipv4 address 172.0.0.1/30 - -set / network-instance default -set / network-instance default interface ethernet-1/1.0 -set / network-instance default interface ethernet-1/2.0 - -set / network-instance default next-hop-groups group group1 nexthop 1 ip-address 172.0.0.2 admin-state enable -set / network-instance default static-routes route 172.16.2.0/24 next-hop-group group1 admin-state enable \ No newline at end of file diff --git a/hackfest/containerlab/srl2.cli b/hackfest/containerlab/srl2.cli deleted file mode 100644 index 395d53c71..000000000 --- a/hackfest/containerlab/srl2.cli +++ /dev/null @@ -1,14 +0,0 @@ -set / interface ethernet-1/2 admin-state enable -set / interface ethernet-1/2 subinterface 0 admin-state enable -set / interface ethernet-1/2 subinterface 0 ipv4 address 172.16.2.1/24 - -set / interface ethernet-1/1 admin-state enable -set / interface ethernet-1/1 subinterface 0 admin-state enable -set / interface ethernet-1/1 subinterface 0 ipv4 address 172.0.0.2/30 - -set / network-instance default -set / network-instance default interface ethernet-1/1.0 -set / network-instance default interface ethernet-1/2.0 - -set /network-instance default next-hop-groups group group1 nexthop 1 ip-address 172.0.0.1 admin-state enable -set /network-instance default static-routes route 172.16.1.0/24 next-hop-group group1 admin-state enable \ No newline at end of file diff --git a/hackfest/containerlab/tfs-scenario.clab.yml b/hackfest/containerlab/tfs-scenario.clab.yml index 91467d2b9..f79378757 100644 --- a/hackfest/containerlab/tfs-scenario.clab.yml +++ b/hackfest/containerlab/tfs-scenario.clab.yml @@ -24,7 +24,7 @@ mgmt: topology: kinds: srl: - image: ghcr.io/nokia/srlinux:21.11.3 + image: ghcr.io/nokia/srlinux:23.3.1 linux: image: ghcr.io/hellt/network-multitool nodes: @@ -34,30 +34,24 @@ topology: cpu: 0.5 memory: 1GB mgmt-ipv4: 172.100.100.101 - startup-config: ./srl1.cli + #startup-config: srl1.cli srl2: kind: srl type: ixr6 cpu: 0.5 memory: 1GB mgmt-ipv4: 172.100.100.102 - startup-config: ./srl2.cli + #startup-config: srl2.cli client1: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.201 - exec: - - ip address add 172.16.1.10/24 dev eth1 - - ip route add 172.16.2.0/24 via 172.16.1.1 client2: kind: linux cpu: 0.1 memory: 100MB mgmt-ipv4: 172.100.100.202 - exec: - - ip address add 172.16.2.10/24 dev eth1 - - ip route add 172.16.1.0/24 via 172.16.2.1 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] -- GitLab