diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh
index 8afd683843d4882e75c3cbca8363aa3d63edda7f..ffd91da35186fe21f418950493ef797a9af1b522 100644
--- a/src/tests/ofc22/deploy_specs.sh
+++ b/src/tests/ofc22/deploy_specs.sh
@@ -2,6 +2,11 @@
 export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
+# Supported components are:
+#   context device automation policy service compute monitoring webui
+#   interdomain slice pathcomp dlt
+#   dbscanserving opticalattackmitigator opticalattackdetector
+#   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
 export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
 
 # Set the tag you want to use for your images.
@@ -13,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs"
 # Set additional manifest files to be applied after the deployment
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
-# Set the neew Grafana admin password
+# Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
diff --git a/src/tests/ofc22/run_test_01_bootstrap.sh b/src/tests/ofc22/run_test_01_bootstrap.sh
index bb740707321b24fc960299f2eac91cc2d9775b64..4f0b6cd7d5c15572c4e6c9e2454e367c86f648ee 100755
--- a/src/tests/ofc22/run_test_01_bootstrap.sh
+++ b/src/tests/ofc22/run_test_01_bootstrap.sh
@@ -13,9 +13,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# make sure to source the following scripts:
-# - my_deploy.sh
-# - tfs_runtime_env_vars.sh
-
 source tfs_runtime_env_vars.sh
 pytest --verbose src/tests/ofc22/tests/test_functional_bootstrap.py
diff --git a/src/tests/ofc22/run_tests_and_coverage.sh b/src/tests/ofc22/run_tests.sh
similarity index 77%
rename from src/tests/ofc22/run_tests_and_coverage.sh
rename to src/tests/ofc22/run_tests.sh
index ae956925a430e0eab167bf36a49be59014a2a97b..0ad4be313987b8b5069808873f94840521d4284e 100755
--- a/src/tests/ofc22/run_tests_and_coverage.sh
+++ b/src/tests/ofc22/run_tests.sh
@@ -16,7 +16,6 @@
 
 PROJECTDIR=`pwd`
 
-# cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 COVERAGEFILE=$PROJECTDIR/coverage/.coverage
 
@@ -31,15 +30,15 @@ source tfs_runtime_env_vars.sh
 # Force a flush of Context database
 kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
 
-# Run functional tests and analyze code coverage at the same time
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+# Run functional tests
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_bootstrap.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_create_service.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_delete_service.py
 
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+pytest --log-level=INFO --verbose \
     src/tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/setup_test_env.sh b/src/tests/ofc22/setup_test_env.sh
deleted file mode 100755
index 1f8b0a5a7a8dc986715c6f54a62151f6afa4ad80..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/setup_test_env.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get service/contextservice --namespace tfs  --template '{{.spec.clusterIP}}')
-export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service/contextservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}')
-export COMPUTESERVICE_SERVICE_HOST=$(kubectl get service/computeservice --namespace tfs  --template '{{.spec.clusterIP}}')
-export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service/computeservice --namespace tfs  -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
-echo "CONTEXTSERVICE_SERVICE_HOST=$CONTEXTSERVICE_SERVICE_HOST"
-echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=$CONTEXTSERVICE_SERVICE_PORT_GRPC"
-echo "COMPUTESERVICE_SERVICE_HOST=$COMPUTESERVICE_SERVICE_HOST"
-echo "COMPUTESERVICE_SERVICE_PORT_HTTP=$COMPUTESERVICE_SERVICE_PORT_HTTP"
diff --git a/tutorial/2-2-ofc22.md b/tutorial/2-2-ofc22.md
index 3b55a0961da78fdc78a8feb31499608589b9d0be..04d585d24cc046e6a1aadc1c93118a1b36855aca 100644
--- a/tutorial/2-2-ofc22.md
+++ b/tutorial/2-2-ofc22.md
@@ -37,9 +37,6 @@ environment and a TeraFlowSDN controller instance as described in the
 [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python
 environment as described in
 [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
-Remember to source the scenario settings, e.g., `cd ~/tfs-ctrl && source ofc22/deploy_specs.sh` in each terminal you open.
-Then, re-build the protocol buffers code from the proto files:
-`./proto/generate_code_python.sh`
 
 
 ## 2.2.4. Access to the WebUI and Dashboard
@@ -55,25 +52,33 @@ Notes:
 
 ## 2.2.5. Test execution
 
-Before executing the tests, the environment variables need to be prepared. 
-First, make sure to load your deployment variables by:
+Before executing the tests, we need to prepare a few things.
+
+First, you need to make sure that you have all the gRPC-generate code in your folder.
+To do so, run:
 
 ```
-source my_deploy.sh
+proto/generate_code_python.sh
 ```
 
-Then, you also need to load the environment variables to support the execution of the 
-tests by:
+Then, it is time to deploy TeraFlowSDN with the correct specification for this scenario.
+Make sure to load your deployment variables for this scenario by:
 
 ```
-source tfs_runtime_env_vars.sh
+source ofc22/deploy_specs.sh
 ```
 
-You also need to make sure that you have all the gRPC-generate code in your folder.
-To do so, run:
+Then, you need to deploy the components by running:
 
 ```
-proto/generate_code_python.sh
+./deploy.sh
+```
+
+After the deployment is finished, you need to load the environment variables to support 
+the execution of the tests by:
+
+```
+source tfs_runtime_env_vars.sh
 ```
 
 To execute this functional test, four main steps needs to be carried out:
@@ -90,8 +95,24 @@ See the troubleshooting section if needed.
 You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts
 after you execute each step.
 
+There are two ways to execute the functional tests, *running all the tests with a single script* or *running each test independently*.
+In the following we start with the first option, then we comment on how to run each test independently.
+
+
+### 2.2.5.1. Running all tests with a single script
+
+We have a script that executes all the steps at once.
+It is meant for being used to test if all components involved in this scenario are working correct.
+To run all the functional tests, you can run:
+
+```
+ofc22/run_tests_and_coverage.sh
+```
+
+The following sections explain each one of the steps.
 
-### 2.2.5.1. Device bootstrapping
+
+### 2.2.5.2. Device bootstrapping
 
 This step configures some basic entities (Context and Topology), the devices, and the 
 links in the topology.
@@ -103,7 +124,11 @@ The expected results are:
 
 To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that
 contains the descriptors of the contexts, topologies, devices, and links, or by 
-executing the `./ofc22/run_test_01_bootstrap.sh` script.
+executing the script:
+
+```
+./ofc22/run_test_01_bootstrap.sh
+```
 
 When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you 
 should see the monitoring data being plotted and updated every 5 seconds (by default). 
@@ -117,12 +142,16 @@ Note here that the emulated devices produce synthetic randomly-generated monitor
 and do not represent any particularservices configured.
 
 
-### 2.2.5.2. L3VPN Service creation
+### 2.2.5.3. L3VPN Service creation
 
 This step configures a new service emulating the request an OSM WIM would make by means 
 of a Mock OSM instance.
 
-To run this step, execute the `./ofc22/run_test_02_create_service.sh` script.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_02_create_service.sh
+```
 
 When the script finishes, check the WebUI *Services* tab. You should see that two 
 services have been created, one for the optical layer and another for the packet layer. 
@@ -133,13 +162,18 @@ the plots with the monitored data for the device.
 By default, device R1-EMU is selected.
 
 
-### 2.2.5.3. L3VPN Service removal
+### 2.2.5.4. L3VPN Service removal
 
 This step deconfigures the previously created services emulating the request an OSM WIM 
 would make by means of a Mock OSM instance.
 
-To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete 
-the L3NM service from the WebUI.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_03_delete_service.sh
+```
+
+or delete the L3NM service from the WebUI.
 
 When the script finishes, check the WebUI *Services* tab.
 You should see that the two services have been removed.
@@ -149,12 +183,16 @@ In the Grafana Dashboard, given that there is no service configured, you should
 0-valued flat plot again.
 
 
-### 2.2.5.4. Cleanup
+### 2.2.5.5. Cleanup
 
 This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities 
 for completeness.
 
-To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script.
+To run this step, execute the script:
+
+```
+./ofc22/run_test_04_cleanup.sh
+```
 
 When the script finishes, check the WebUI *Devices* tab, you should see that the devices 
 have been removed.