diff --git a/deploy/all.sh b/deploy/all.sh
index c169bc92c0d9a6dea87de919ad20b4cf3afc1199..cedbb5b8bfc7ef363c1d60a9d8f2b6cef63be384 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -27,7 +27,44 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 
 # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
 # By default, only basic components are deployed
-export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"}
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate BGP-LS Speaker
+#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
+
+# Uncomment to activate Optical Controller
+#   To manage optical connections, "service" requires "opticalcontroller" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
+#fi
+
+# Uncomment to activate ZTP
+#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
+
+# Uncomment to activate Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
+# Uncomment to activate Forecaster
+#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
+
+# Uncomment to activate E2E Orchestrator
+#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
 
 # If not already set, set the tag you want to use for your images.
 export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
@@ -67,8 +104,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
 export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 
 # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
-# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
-# checking/deploying CockroachDB.
 # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
 #   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
 # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
@@ -80,7 +115,7 @@ export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
 
 # If not already set, disable flag for dropping database, if it exists.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
-# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
+# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while
 # checking/deploying CockroachDB.
 export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""}
 
@@ -102,6 +137,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
 # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
 export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
 
+# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
+# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
+#   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
+# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
+#   with 3 replicas (set by default) will be deployed. It is convenient for production and
+#   provides scalability features.
+export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
+
 # If not already set, disable flag for re-deploying NATS from scratch.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
 # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
@@ -137,7 +180,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 # If not already set, disable flag for dropping tables if they exist.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
 # If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
-# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while 
+# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while
 # checking/deploying QuestDB.
 export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 
diff --git a/deploy/crdb.sh b/deploy/crdb.sh
index c979ad4f2c18861c6a93b6b04e5d8e3e71aae41e..3e80b6350e66ec30a725c45acb7cf954ac3009c8 100755
--- a/deploy/crdb.sh
+++ b/deploy/crdb.sh
@@ -37,8 +37,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
 export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 
 # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
-# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
-# checking/deploying CockroachDB.
 # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
 #   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
 # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
@@ -48,7 +46,7 @@ export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 #   Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html
 export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
 
-# If not already set, disable flag for dropping database if exists.
+# If not already set, disable flag for dropping database, if it exists.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
 # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while
 # checking/deploying CockroachDB.
@@ -79,7 +77,7 @@ function crdb_deploy_single() {
     kubectl create namespace ${CRDB_NAMESPACE}
     echo
 
-    echo "CockroachDB (single-node)"
+    echo "CockroachDB (single-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> CockroachDB is present; skipping step."
@@ -139,7 +137,7 @@ function crdb_deploy_single() {
 }
 
 function crdb_undeploy_single() {
-    echo "CockroachDB"
+    echo "CockroachDB (single-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> Undeploy CockroachDB"
@@ -223,7 +221,7 @@ function crdb_deploy_cluster() {
     kubectl create namespace ${CRDB_NAMESPACE}
     echo
 
-    echo "CockroachDB"
+    echo "CockroachDB (cluster-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> CockroachDB is present; skipping step."
@@ -319,7 +317,7 @@ function crdb_undeploy_cluster() {
     fi
     echo
 
-    echo "CockroachDB"
+    echo "CockroachDB (cluster-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> Undeploy CockroachDB"
diff --git a/deploy/nats.sh b/deploy/nats.sh
index 366270a6915a1eef969846446ecc9152c3fa9531..e9cef883ee7b909255d44551919771ebc49f524b 100755
--- a/deploy/nats.sh
+++ b/deploy/nats.sh
@@ -27,6 +27,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
 # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
 export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
 
+# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
+# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
+#   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
+# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
+#   with 3 replicas (set by default) will be deployed. It is convenient for production and
+#   provides scalability features.
+export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
+
 # If not already set, disable flag for re-deploying NATS from scratch.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
 # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
@@ -37,6 +45,14 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
 # Automated steps start here
 ########################################################################################################################
 
+# Constants
+TMP_FOLDER="./tmp"
+NATS_MANIFESTS_PATH="manifests/nats"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+
 function nats_deploy_single() {
     echo "NATS Namespace"
     echo ">>> Create NATS Namespace (if missing)"
@@ -47,18 +63,86 @@ function nats_deploy_single() {
     helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
     echo
 
+    echo "Install NATS (single-mode)"
+    echo ">>> Checking if NATS is deployed..."
+    if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
+        echo ">>> NATS is present; skipping step."
+    else
+        echo ">>> Deploy NATS"
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true
+
+
+        echo ">>> Waiting NATS statefulset to be created..."
+        while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+
+        # Wait for statefulset condition "Available=True" does not work
+        # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
+        #   "error: readyReplicas is not found"
+        # Workaround: Check the pods are ready
+        #echo ">>> NATS statefulset created. Waiting for readiness condition..."
+        #kubectl wait --namespace  ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats
+        #kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
+        #    statefulset/nats
+        echo ">>> NATS statefulset created. Waiting NATS pods to be created..."
+        while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
+    fi
+    echo
+
+    echo "NATS Port Mapping"
+    echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
+    NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
+    PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})"
+    NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}')
+    PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+
+function nats_deploy_cluster() {
+    echo "NATS Namespace"
+    echo ">>> Create NATS Namespace (if missing)"
+    kubectl create namespace ${NATS_NAMESPACE}
+    echo
+
+    echo "Add NATS Helm Chart"
+    helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
+    echo
+
     echo "Upgrade NATS Helm Chart"
     helm3 repo update nats
     echo
 
-    echo "Install NATS (single-node)"
+    echo "Install NATS (cluster-mode)"
     echo ">>> Checking if NATS is deployed..."
     if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
         echo ">>> NATS is present; skipping step."
     else
         echo ">>> Deploy NATS"
-        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
-
+        cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
+    
         echo ">>> Waiting NATS statefulset to be created..."
         while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
             printf "%c" "."
@@ -78,7 +162,17 @@ function nats_deploy_single() {
             printf "%c" "."
             sleep 1
         done
+        while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
         kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
+        kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1
+        kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2
     fi
     echo
 
@@ -110,7 +204,7 @@ function nats_deploy_single() {
     echo
 }
 
-function nats_undeploy_single() {
+function nats_undeploy() {
     echo "NATS"
     echo ">>> Checking if NATS is deployed..."
     if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
@@ -128,7 +222,13 @@ function nats_undeploy_single() {
 }
 
 if [ "$NATS_REDEPLOY" == "YES" ]; then
-    nats_undeploy_single
+    nats_undeploy
 fi
 
-nats_deploy_single
+if [ "$NATS_DEPLOY_MODE" == "single" ]; then
+    nats_deploy_single
+elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then
+    nats_deploy_cluster
+else
+    echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE"
+fi
\ No newline at end of file
diff --git a/deploy/qdb.sh b/deploy/qdb.sh
index acbcfd4f96ccbd2b09d5d82f66a1bf801a710780..ebb75dce9ad3007145a5129df3a4037a9392e875 100755
--- a/deploy/qdb.sh
+++ b/deploy/qdb.sh
@@ -44,7 +44,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 
 # If not already set, disable flag for dropping tables if they exist.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
-# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables
+# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
 # QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped
 # while checking/deploying QuestDB.
 export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 3fdbe77fb502c42aaf7dd507ab239f6b3bb20056..04895f98448694c4eca3861079df09c366b0000f 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -204,6 +204,14 @@ if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
     DOCKER_BUILD="docker buildx build"
 fi
 
+LINKERD_STATUS="$(microk8s status -a linkerd)"
+if [[ $linkerd_status =~ "enabled" ]]; then
+    echo "LinkerD installed: workloads will be injected"
+else
+    echo "LinkerD not installed"
+fi
+printf "\n"
+
 for COMPONENT in $TFS_COMPONENTS; do
     echo "Processing '$COMPONENT' component..."
 
@@ -279,8 +287,11 @@ for COMPONENT in $TFS_COMPONENTS; do
 
     echo "  Adapting '$COMPONENT' manifest file..."
     MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
-    # cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
-    cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
+    if [[ $linkerd_status =~ "enabled" ]]; then
+        cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
+    else
+        cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    fi
 
     if [ "$COMPONENT" == "pathcomp" ]; then
         IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml
index 4d9ef0f844b5ffb02753b6cc7a7be7d03928896c..bcb0c704948ecdbd8b271b68e685c481e669594b 100644
--- a/manifests/cockroachdb/cluster.yaml
+++ b/manifests/cockroachdb/cluster.yaml
@@ -39,8 +39,8 @@ spec:
       cpu: 8
       memory: 8Gi
   tlsEnabled: true
-# You can set either a version of the db or a specific image name
-# cockroachDBVersion: v22.2.8
+  # You can set either a version of the db or a specific image name
+  # cockroachDBVersion: v22.2.8
   image:
     name: cockroachdb/cockroach:v22.2.8
   # nodes refers to the number of crdb pods that are created
@@ -49,21 +49,16 @@ spec:
   additionalLabels:
     crdb: is-cool
   # affinity is a new API field that is behind a feature gate that is
-  # disabled by default.  To enable please see the operator.yaml file.
+  # disabled by default. To enable please see the operator.yaml file.
 
   # The affinity field will accept any podSpec affinity rule.
-  # affinity:
-  #   podAntiAffinity:
-  #      preferredDuringSchedulingIgnoredDuringExecution:
-  #      - weight: 100
-  #        podAffinityTerm:
-  #          labelSelector:
-  #            matchExpressions:
-  #            - key: app.kubernetes.io/instance
-  #              operator: In
-  #              values:
-  #              - cockroachdb
-  #          topologyKey: kubernetes.io/hostname
+  topologySpreadConstraints:
+  - maxSkew: 1
+    topologyKey: kubernetes.io/hostname  
+    whenUnsatisfiable: ScheduleAnyway
+    labelSelector:
+      matchLabels:
+        app.kubernetes.io/instance: cockroachdb
 
   # nodeSelectors used to match against
   # nodeSelector:
diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml
index 59d515061c4c0f253523aab803653b3f33007461..d8e691308e4cc16af3f545d87244281ab0730696 100644
--- a/manifests/cockroachdb/operator.yaml
+++ b/manifests/cockroachdb/operator.yaml
@@ -381,6 +381,7 @@ spec:
     spec:
       containers:
       - args:
+        - -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true
         - -zap-log-level
         - info
         env:
diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..00dbef17fca74ca906d4f97ee6e8751c03ef493f
--- /dev/null
+++ b/manifests/nats/cluster.yaml
@@ -0,0 +1,47 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+container:
+  image:
+    tags: 2.9-alpine
+  env:
+    # different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB
+    # should be ~90% of memory limit
+    GOMEMLIMIT: 400MiB
+  merge:
+    # recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters
+    resources:
+      requests:
+        cpu: 1
+        memory: 500Mi
+      limits:
+        cpu: 1
+        memory: 1Gi
+
+config:
+  cluster:
+    enabled: true
+    replicas: 3
+  jetstream:
+    enabled: true
+    fileStore:
+      pvc:
+        size: 4Gi
+
+# Force one pod per node, if possible
+podTemplate:
+  topologySpreadConstraints:
+    kubernetes.io/hostname:
+      maxSkew: 1
+      whenUnsatisfiable: ScheduleAnyway
diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml
index 0892f0c9b790b936df5540ac5fe1aed0270b91a5..955d5726a9f8f79560327a8f595c1865f6d37d22 100644
--- a/manifests/nginx_ingress_http.yaml
+++ b/manifests/nginx_ingress_http.yaml
@@ -18,6 +18,11 @@ metadata:
   name: tfs-ingress
   annotations:
     nginx.ingress.kubernetes.io/rewrite-target: /$2
+    nginx.ingress.kubernetes.io/limit-rps: "50"
+    nginx.ingress.kubernetes.io/limit-connections: "50"
+    nginx.ingress.kubernetes.io/proxy-connect-timeout: "50"
+    nginx.ingress.kubernetes.io/proxy-send-timeout: "50"
+    nginx.ingress.kubernetes.io/proxy-read-timeout: "50"
 spec:
   rules:
     - http:
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index a519aa4a2f8a1e81f1b7f2a1be1965ec0b8bb386..19317323f2a60293a33d740b28b3795627846642 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -117,3 +117,25 @@ spec:
     - name: grafana
       port: 3000
       targetPort: 3000
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: webuiservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: webuiservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 50
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/my_deploy.sh b/my_deploy.sh
index 8417f6eae510391e65d5f91202e59cccf32e1f98..6007a7ff971231c6c135dfad7b9385187f028421 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -123,6 +123,10 @@ export NATS_EXT_PORT_CLIENT="4222"
 # Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
 export NATS_EXT_PORT_HTTP="8222"
 
+# Set NATS installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/nats.sh for additional details
+export NATS_DEPLOY_MODE="single"
+
 # Disable flag for re-deploying NATS from scratch.
 export NATS_REDEPLOY=""
 
diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md b/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md
index 88f7a7bd5c7a268857a7a4ec2642c388daf715d3..f4faae268f75f96223b4c74571de695fada11497 100644
--- a/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md
+++ b/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md
@@ -1,4 +1,4 @@
-<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md b/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md
index 9b0c48c8ed24fe8ca5c06f118b3d440653c686e5..c7e975e864b042a1a4190f6090d5ed2ccee8ebf0 100644
--- a/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md
+++ b/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md
@@ -1,4 +1,4 @@
-<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md b/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md
index 452050b65106b8393ac8a7df98ea472b7705e608..ac9143d153d48d713210662249ffc15b833b4c83 100644
--- a/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md
+++ b/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md
@@ -1,4 +1,4 @@
-<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
diff --git a/src/device/requirements.in b/src/device/requirements.in
index 73ea741d16dcdafd7a9be87ad79b457ccb6c5d5e..6f20b0de1c62eee000a22244f38b2ab0fd4aefd5 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -23,7 +23,7 @@ Flask==2.1.3
 Flask-HTTPAuth==4.5.0
 Flask-RESTful==0.3.9
 Jinja2==3.0.3
-ncclient==0.6.13
+ncclient==0.6.15
 p4runtime==1.3.0
 pandas==1.5.*
 paramiko==2.9.2
diff --git a/src/dlt/gateway/settings.gradle.kts b/src/dlt/gateway/settings.gradle.kts
index 77fa0f0b22918cf306f0e5f07506a35e492142b4..6500a488a10c31fba79da633993989e5a7e7ec40 100644
--- a/src/dlt/gateway/settings.gradle.kts
+++ b/src/dlt/gateway/settings.gradle.kts
@@ -1,5 +1,5 @@
 /*
- * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+ * Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 66e188465994a47f173dcca93237b46cd86adb16..c154346204a4ad59eec54a7e9ae3956a7f3db655 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -156,7 +156,7 @@
           <div class="container">
             <div class="row">
               <div class="col-md-12">
-                <p class="text-center" style="color: white;">&copy; 2022-2023 <a href="https://tfs.etsi.org/">ETSI TeraFlowSDN (TFS) OSG</a></p>
+                <p class="text-center" style="color: white;">&copy; 2022-2024 <a href="https://tfs.etsi.org/">ETSI OSG/SDG TeraFlowSDN (TFS)</a></p>
               </div>
             </div>
             <div class="row">