diff --git a/deploy/mock_blockchain.sh b/deploy/mock_blockchain.sh
index ef7811c87eabdcb7cf95db2e4cf1a6eee52ef6ca..74d62cd526a38298c8f197fedf0c0169dbb8efca 100755
--- a/deploy/mock_blockchain.sh
+++ b/deploy/mock_blockchain.sh
@@ -38,20 +38,21 @@ GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/manifests"
 mkdir -p $TMP_MANIFESTS_FOLDER
-TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+TMP_LOGS_FOLDER="${TMP_FOLDER}/${K8S_NAMESPACE}/logs"
 mkdir -p $TMP_LOGS_FOLDER
 
 echo "Deleting and Creating a new namespace..."
-kubectl delete namespace $K8S_NAMESPACE
+kubectl delete namespace $K8S_NAMESPACE --ignore-not-found
 kubectl create namespace $K8S_NAMESPACE
 printf "\n"
 
 echo "Deploying components and collecting environment variables..."
 ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh
-echo "# Environment variables for TeraFlow Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
+echo "# Environment variables for TeraFlowSDN Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
 PYTHONPATH=$(pwd)/src
+echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
 
 echo "Processing '$COMPONENT' component..."
 IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
@@ -77,12 +78,12 @@ cp ./manifests/"${COMPONENT}".yaml "$MANIFEST"
 
 if [ -n "$REGISTRY_IMAGE" ]; then
     # Registry is set
-    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
     sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
     sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
 else
     # Registry is not set
-    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
     sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
     sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
 fi
@@ -91,8 +92,8 @@ echo "  Deploying '$COMPONENT' component to Kubernetes..."
 DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
 kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
 COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
-kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
-kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
+#kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
+#kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
 
 echo "  Collecting env-vars for '$COMPONENT' component..."
 SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json)
diff --git a/src/tests/scenario2/Scenario.md b/src/tests/scenario2/Scenario.md
index 8dad4691ade669522b5c82a5e4ed07e5d0279492..964c8b2d089cd9316dd5d7f8061712160408bc48 100644
--- a/src/tests/scenario2/Scenario.md
+++ b/src/tests/scenario2/Scenario.md
@@ -1,47 +1,64 @@
-# Scenario:
-
-- 4 TFS instances
-
-    - domain D1 (source for e-2-e service)
-        5 routers + 1 DC
-        R1@D1/2 <--> R2@D1/1
-        R2@D1/3 <--> R3@D1/2
-        R2@D1/5 <--> R5@D1/2
-        R3@D1/4 <--> R4@D1/3
-        R4@D1/5 <--> R5@D1/4
-        R5@D1/1 <--> R1@D1/5
-        R1@D1/100 <--> DCGW@D1/eth1
-
-    - domain D2 (transit for e-2-e service)
-        6 routers
-        R1@D2/2 <--> R2@D2/1
-        R1@D2/6 <--> R6@D2/1
-        R1@D2/5 <--> R5@D2/1
-        R2@D2/3 <--> R3@D2/2
-        R2@D2/4 <--> R4@D2/2
-        R2@D2/5 <--> R5@D2/2
-        R2@D2/6 <--> R6@D2/2
-        R3@D2/6 <--> R6@D2/3
-        R4@D2/5 <--> R5@D2/4
-
-    - domain D3 (transit for e-2-e service)
-        4 routers
-        R1@D3/2 <--> R2@D3/1
-        R2@D3/3 <--> R3@D3/2
-        R3@D3/4 <--> R4@D3/3
-        R4@D3/1 <--> R1@D3/4
-        R2@D3/4 <--> R4@D3/2
-
-    - domain D4 (end for e-2-e service)
-        3 routers
-        R1@D4/2 <--> R2@D4/1
-        R1@D4/3 <--> R3@D4/1
-        R2@D4/3 <--> R3@D4/2
-        R3@D4/100 <--> DCGW@D4/eth1
-
-    - interdomain links
-        R4@D1/10 <--> R1@D2/10
-        R5@D1/10 <--> R1@D3/10
-        R4@D2/10 <--> R2@D4/10
-        R5@D2/10 <--> R2@D3/10
-        R3@D3/10 <--> R1@D4/10
+# Scenario Description
+
+This scenario is composed of 4 TeraFlowSDN instances.
+Each instance has its own local network topology detailed below.
+Besides, each instance exposes an abstracted view of its local network domain.
+Finally, the different domains are interconnected among them by means of the inter-domain links detailed below.
+
+## Domain D1 (end for the end-to-end interdomain slice)
+
+Domain D1 is composed of 5 emulated packet routers (Rx@D1) and 1 emulated DataCenter (DCGW@D1).
+The DCGW@D1 is a termination endpoint for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D1/2 <--> R2@D1/1
+- R2@D1/3 <--> R3@D1/2
+- R2@D1/5 <--> R5@D1/2
+- R3@D1/4 <--> R4@D1/3
+- R4@D1/5 <--> R5@D1/4
+- R5@D1/1 <--> R1@D1/5
+- R1@D1/100 <--> DCGW@D1/eth1
+
+## Domain D2 (transit for the end-to-end interdomain slice)
+
+Domain D2 is composed of 6 emulated packet routers (Rx@D2).
+This domain behaves as a transit domain for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D2/2 <--> R2@D2/1
+- R1@D2/6 <--> R6@D2/1
+- R1@D2/5 <--> R5@D2/1
+- R2@D2/3 <--> R3@D2/2
+- R2@D2/4 <--> R4@D2/2
+- R2@D2/5 <--> R5@D2/2
+- R2@D2/6 <--> R6@D2/2
+- R3@D2/6 <--> R6@D2/3
+- R4@D2/5 <--> R5@D2/4
+
+## Domain D3 (transit for the end-to-end interdomain slice)
+
+Domain D3 is composed of 6 emulated packet routers (Rx@D3).
+This domain behaves as a transit domain for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D3/2 <--> R2@D3/1
+- R2@D3/3 <--> R3@D3/2
+- R3@D3/4 <--> R4@D3/3
+- R4@D3/1 <--> R1@D3/4
+- R2@D3/4 <--> R4@D3/2
+
+## Domain D4 (end for the end-to-end interdomain slice)
+
+Domain D4 is composed of 3 emulated packet routers (Rx@D4) and 1 emulated DataCenter (DCGW@D4).
+The DCGW@D4 is a termination endpoint for the end-to-end interdomain slice.
+The internal domain connectivity is defined as follows:
+- R1@D4/2 <--> R2@D4/1
+- R1@D4/3 <--> R3@D4/1
+- R2@D4/3 <--> R3@D4/2
+- R3@D4/100 <--> DCGW@D4/eth1
+
+## Inter-domain Connectivity
+
+The 4 domains are interconnected among them by means of the following inter-domain links:
+- R4@D1/10 <--> R1@D2/10
+- R5@D1/10 <--> R1@D3/10
+- R4@D2/10 <--> R2@D4/10
+- R5@D2/10 <--> R2@D3/10
+- R3@D3/10 <--> R1@D4/10
diff --git a/src/tests/scenario2/deploy_all.sh b/src/tests/scenario2/deploy_all.sh
index 541612db431fd73e58fd7c1699df97342c11ea70..1eac2e4da701d2f2c60f8690c4784b59522a8b72 100755
--- a/src/tests/scenario2/deploy_all.sh
+++ b/src/tests/scenario2/deploy_all.sh
@@ -23,8 +23,8 @@ kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml
 kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml
 kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml
 
-# Delete MockBlockchain
-#kubectl delete namespace tfs-bchain
+# Delete MockBlockchain (comment out if using a real blockchain)
+kubectl delete namespace tfs-bchain
 
 # Create secondary ingress controllers
 kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml
@@ -32,8 +32,8 @@ kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml
 kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml
 kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml
 
-# Create MockBlockchain
-#./deploy_mock_blockchain.sh
+# Create MockBlockchain (comment out if using a real blockchain)
+./deploy/mock_blockchain.sh
 
 # Deploy TFS for Domain 1
 source nfvsdn22/deploy_specs_dom1.sh
diff --git a/src/tests/scenario2/deploy_specs_dom1.sh b/src/tests/scenario2/deploy_specs_dom1.sh
index cfe8a3bf63d875b4c579e36ff6a904e0f4b62e02..7dd777fbee12537729e8408fe671074a1e9b19f1 100755
--- a/src/tests/scenario2/deploy_specs_dom1.sh
+++ b/src/tests/scenario2/deploy_specs_dom1.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom1"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom1.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
-export TFS_SKIP_BUILD="NO"
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom1"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom1"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4223"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8223"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom1"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8813"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9011"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9001"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/deploy_specs_dom2.sh b/src/tests/scenario2/deploy_specs_dom2.sh
index 7034c22cdcdc93b6c6fc0e5227e0ef38bda95a55..bb6ce2f0c390d3253696e62bf23aa85b0d16782e 100755
--- a/src/tests/scenario2/deploy_specs_dom2.sh
+++ b/src/tests/scenario2/deploy_specs_dom2.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom2"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom2.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+# Disable skip-build flag to rebuild the Docker images.
 export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom2"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom2"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4224"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8224"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom2"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8814"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9012"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9002"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/deploy_specs_dom3.sh b/src/tests/scenario2/deploy_specs_dom3.sh
index 044301418405ba20dfaf00cd58f9a1a15e7e62a7..797d55894a143308935664f2c879260dfd2760ec 100755
--- a/src/tests/scenario2/deploy_specs_dom3.sh
+++ b/src/tests/scenario2/deploy_specs_dom3.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom3"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom3.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+# Disable skip-build flag to rebuild the Docker images.
 export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom3"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom3"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4225"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8225"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom3"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8815"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9013"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9003"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/deploy_specs_dom4.sh b/src/tests/scenario2/deploy_specs_dom4.sh
index 9e26ace470c81b0bdccfa83bf4eb7369970c981b..d2fe2abfa981d498558d263ca053093e017225d2 100755
--- a/src/tests/scenario2/deploy_specs_dom4.sh
+++ b/src/tests/scenario2/deploy_specs_dom4.sh
@@ -1,10 +1,11 @@
+#!/bin/bash
 # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,24 +13,142 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate InterDomain
+export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain"
+
+# Uncomment to activate DLT
+export TFS_COMPONENTS="${TFS_COMPONENTS} dlt"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs-dom4"
 
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom4.yaml"
 
-# Set the neew Grafana admin password
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+# Disable skip-build flag to rebuild the Docker images.
 export TFS_SKIP_BUILD="YES"
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs_dom4"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats-dom4"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4226"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8226"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb-dom4"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8816"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9014"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9004"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/tests/scenario2/dump_logs.sh b/src/tests/scenario2/dump_logs.sh
index 7b1dc9d17aabcf8866b76ed1acdb367eee0e3b51..c7acedbf613b66f3ec08bd1628e8e1ab76b9bc5e 100755
--- a/src/tests/scenario2/dump_logs.sh
+++ b/src/tests/scenario2/dump_logs.sh
@@ -13,64 +13,70 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-rm -rf tmp/exec
-
 echo "Collecting logs for MockBlockChain..."
-mkdir -p tmp/exec/mbc
-kubectl --namespace tfs-bchain logs deployments/mock-blockchain server > tmp/exec/mbc/mock-blockchain.log
+rm -rf tmp/tfs-bchain/exec
+mkdir -p tmp/tfs-bchain/exec
+kubectl --namespace tfs-bchain logs deployments/mock-blockchain server > tmp/tfs-bchain/exec/mock-blockchain.log
 printf "\n"
 
 echo "Collecting logs for Domain 1..."
-mkdir -p tmp/exec/dom1
-kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/exec/dom1/context.log
-kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/exec/dom1/device.log
-kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/dom1/service.log
-kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log
-kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log
-kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log
-kubectl --namespace tfs-dom1 logs deployments/interdomainservice server > tmp/exec/dom1/interdomain.log
-kubectl --namespace tfs-dom1 logs deployments/dltservice connector > tmp/exec/dom1/dlt-connector.log
-kubectl --namespace tfs-dom1 logs deployments/dltservice gateway > tmp/exec/dom1/dlt-gateway.log
+rm -rf tmp/tfs-dom1/exec
+mkdir -p tmp/tfs-dom1/exec
+kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/tfs-dom1/exec/context.log
+kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/tfs-dom1/exec/device.log
+kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/tfs-dom1/exec/service.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/tfs-dom1/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/tfs-dom1/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/tfs-dom1/exec/slice.log
+kubectl --namespace tfs-dom1 logs deployments/interdomainservice server > tmp/tfs-dom1/exec/interdomain.log
+kubectl --namespace tfs-dom1 logs deployments/dltservice connector > tmp/tfs-dom1/exec/dlt-connector.log
+kubectl --namespace tfs-dom1 logs deployments/dltservice gateway > tmp/tfs-dom1/exec/dlt-gateway.log
+kubectl --namespace tfs-dom1 logs deployments/webuiservice server > tmp/tfs-dom1/exec/webui.log
 printf "\n"
 
 echo "Collecting logs for Domain 2..."
-mkdir -p tmp/exec/dom2
-kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/exec/dom2/context.log
-kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/exec/dom2/device.log
-kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/dom2/service.log
-kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log
-kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log
-kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log
-kubectl --namespace tfs-dom2 logs deployments/interdomainservice server > tmp/exec/dom2/interdomain.log
-kubectl --namespace tfs-dom2 logs deployments/dltservice connector > tmp/exec/dom2/dlt-connector.log
-kubectl --namespace tfs-dom2 logs deployments/dltservice gateway > tmp/exec/dom2/dlt-gateway.log
+rm -rf tmp/tfs-dom2/exec
+mkdir -p tmp/tfs-dom2/exec
+kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/tfs-dom2/exec/context.log
+kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/tfs-dom2/exec/device.log
+kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/tfs-dom2/exec/service.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/tfs-dom2/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/tfs-dom2/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/tfs-dom2/exec/slice.log
+kubectl --namespace tfs-dom2 logs deployments/interdomainservice server > tmp/tfs-dom2/exec/interdomain.log
+kubectl --namespace tfs-dom2 logs deployments/dltservice connector > tmp/tfs-dom2/exec/dlt-connector.log
+kubectl --namespace tfs-dom2 logs deployments/dltservice gateway > tmp/tfs-dom2/exec/dlt-gateway.log
+kubectl --namespace tfs-dom2 logs deployments/webuiservice server > tmp/tfs-dom2/exec/webui.log
 printf "\n"
 
 echo "Collecting logs for Domain 3..."
-mkdir -p tmp/exec/dom3
-kubectl --namespace tfs-dom3 logs deployments/contextservice server > tmp/exec/dom3/context.log
-kubectl --namespace tfs-dom3 logs deployments/deviceservice server > tmp/exec/dom3/device.log
-kubectl --namespace tfs-dom3 logs deployments/serviceservice server > tmp/exec/dom3/service.log
-kubectl --namespace tfs-dom3 logs deployments/pathcompservice frontend > tmp/exec/dom3/pathcomp-frontend.log
-kubectl --namespace tfs-dom3 logs deployments/pathcompservice backend > tmp/exec/dom3/pathcomp-backend.log
-kubectl --namespace tfs-dom3 logs deployments/sliceservice server > tmp/exec/dom3/slice.log
-kubectl --namespace tfs-dom3 logs deployments/interdomainservice server > tmp/exec/dom3/interdomain.log
-kubectl --namespace tfs-dom3 logs deployments/dltservice connector > tmp/exec/dom3/dlt-connector.log
-kubectl --namespace tfs-dom3 logs deployments/dltservice gateway > tmp/exec/dom3/dlt-gateway.log
+rm -rf tmp/tfs-dom3/exec
+mkdir -p tmp/tfs-dom3/exec
+kubectl --namespace tfs-dom3 logs deployments/contextservice server > tmp/tfs-dom3/exec/context.log
+kubectl --namespace tfs-dom3 logs deployments/deviceservice server > tmp/tfs-dom3/exec/device.log
+kubectl --namespace tfs-dom3 logs deployments/serviceservice server > tmp/tfs-dom3/exec/service.log
+kubectl --namespace tfs-dom3 logs deployments/pathcompservice frontend > tmp/tfs-dom3/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom3 logs deployments/pathcompservice backend > tmp/tfs-dom3/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom3 logs deployments/sliceservice server > tmp/tfs-dom3/exec/slice.log
+kubectl --namespace tfs-dom3 logs deployments/interdomainservice server > tmp/tfs-dom3/exec/interdomain.log
+kubectl --namespace tfs-dom3 logs deployments/dltservice connector > tmp/tfs-dom3/exec/dlt-connector.log
+kubectl --namespace tfs-dom3 logs deployments/dltservice gateway > tmp/tfs-dom3/exec/dlt-gateway.log
+kubectl --namespace tfs-dom3 logs deployments/webuiservice server > tmp/tfs-dom3/exec/webui.log
 printf "\n"
 
 echo "Collecting logs for Domain 4..."
-mkdir -p tmp/exec/dom4
-kubectl --namespace tfs-dom4 logs deployments/contextservice server > tmp/exec/dom4/context.log
-kubectl --namespace tfs-dom4 logs deployments/deviceservice server > tmp/exec/dom4/device.log
-kubectl --namespace tfs-dom4 logs deployments/serviceservice server > tmp/exec/dom4/service.log
-kubectl --namespace tfs-dom4 logs deployments/pathcompservice frontend > tmp/exec/dom4/pathcomp-frontend.log
-kubectl --namespace tfs-dom4 logs deployments/pathcompservice backend > tmp/exec/dom4/pathcomp-backend.log
-kubectl --namespace tfs-dom4 logs deployments/sliceservice server > tmp/exec/dom4/slice.log
-kubectl --namespace tfs-dom4 logs deployments/interdomainservice server > tmp/exec/dom4/interdomain.log
-kubectl --namespace tfs-dom4 logs deployments/dltservice connector > tmp/exec/dom4/dlt-connector.log
-kubectl --namespace tfs-dom4 logs deployments/dltservice gateway > tmp/exec/dom4/dlt-gateway.log
+rm -rf tmp/tfs-dom4/exec
+mkdir -p tmp/tfs-dom4/exec
+kubectl --namespace tfs-dom4 logs deployments/contextservice server > tmp/tfs-dom4/exec/context.log
+kubectl --namespace tfs-dom4 logs deployments/deviceservice server > tmp/tfs-dom4/exec/device.log
+kubectl --namespace tfs-dom4 logs deployments/serviceservice server > tmp/tfs-dom4/exec/service.log
+kubectl --namespace tfs-dom4 logs deployments/pathcompservice frontend > tmp/tfs-dom4/exec/pathcomp-frontend.log
+kubectl --namespace tfs-dom4 logs deployments/pathcompservice backend > tmp/tfs-dom4/exec/pathcomp-backend.log
+kubectl --namespace tfs-dom4 logs deployments/sliceservice server > tmp/tfs-dom4/exec/slice.log
+kubectl --namespace tfs-dom4 logs deployments/interdomainservice server > tmp/tfs-dom4/exec/interdomain.log
+kubectl --namespace tfs-dom4 logs deployments/dltservice connector > tmp/tfs-dom4/exec/dlt-connector.log
+kubectl --namespace tfs-dom4 logs deployments/dltservice gateway > tmp/tfs-dom4/exec/dlt-gateway.log
+kubectl --namespace tfs-dom4 logs deployments/webuiservice server > tmp/tfs-dom4/exec/webui.log
 printf "\n"
 
 echo "Done!"
diff --git a/src/tests/scenario2/reset.sh b/src/tests/scenario2/reset.sh
index 5f4a3b8e5b8a58d8f2acf7c60cde1e77c2e1873a..f9e7ecab2d4693ef161ae9349e13a3ad0200c97c 100755
--- a/src/tests/scenario2/reset.sh
+++ b/src/tests/scenario2/reset.sh
@@ -14,10 +14,38 @@
 # limitations under the License.
 
 
+# Destroy all replicas of all microservices
+
 kubectl --namespace tfs-dom1 scale --replicas=0 \
     deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
     deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
 
+kubectl --namespace tfs-dom2 scale --replicas=0 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom3 scale --replicas=0 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom4 scale --replicas=0 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+# Create a single replica per microservice
+
 kubectl --namespace tfs-dom1 scale --replicas=1 \
     deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
     deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom2 scale --replicas=1 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom3 scale --replicas=1 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
+
+kubectl --namespace tfs-dom4 scale --replicas=1 \
+    deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \
+    deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice
diff --git a/src/tests/scenario2/show_deploy.sh b/src/tests/scenario2/show_deploy.sh
index 2aa8de873cf22e75be830c713fc379df9df154a4..20bbfaacef39b7b9b9c18facc3e20382e526377a 100755
--- a/src/tests/scenario2/show_deploy.sh
+++ b/src/tests/scenario2/show_deploy.sh
@@ -24,3 +24,27 @@ printf "\n"
 echo "Deployment Ingress:"
 kubectl --namespace tfs-dom1 get ingress
 printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom2 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom2 get ingress
+printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom3 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom3 get ingress
+printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace tfs-dom4 get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace tfs-dom4 get ingress
+printf "\n"