diff --git a/.gitignore b/.gitignore
index 8f01f60bf9c979a098c81d1609cc176cab630a5c..71b77da25c4d53db49b24642d88062906e7db219 100644
--- a/.gitignore
+++ b/.gitignore
@@ -155,3 +155,6 @@ cython_debug/
 
 # Sqlite
 *.db
+
+# TeraFlowSDN-generated files
+tfs_runtime_env_vars.sh
diff --git a/deploy.sh b/deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1eeebb805e426f0e6b719a4a46936a14da8bd72f
--- /dev/null
+++ b/deploy.sh
@@ -0,0 +1,236 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the URL of your local Docker registry where the images will be uploaded to.
+# Leave it blank if you do not want to use any Docker registry.
+export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""}
+#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/"
+
+# If not already set, set the list of components you want to build images for, and deploy.
+# By default, only basic components are deployed
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"}
+
+# If not already set, set the tag you want to use for your images.
+export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+# If not already set, set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
+
+# If not already set, set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+TMP_FOLDER="./tmp"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+mkdir -p $TMP_LOGS_FOLDER
+
+echo "Deleting and Creating a new namespace..."
+kubectl delete namespace $TFS_K8S_NAMESPACE
+kubectl create namespace $TFS_K8S_NAMESPACE
+printf "\n"
+
+if [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then
+    echo "Creating secrets for InfluxDB..."
+    #TODO: make sure to change this when having a production deployment
+    kubectl create secret generic influxdb-secrets --namespace=$TFS_K8S_NAMESPACE \
+        --from-literal=INFLUXDB_DB="monitoring" --from-literal=INFLUXDB_ADMIN_USER="teraflow" \
+        --from-literal=INFLUXDB_ADMIN_PASSWORD="teraflow" --from-literal=INFLUXDB_HTTP_AUTH_ENABLED="True"
+    kubectl create secret generic monitoring-secrets --namespace=$TFS_K8S_NAMESPACE \
+        --from-literal=INFLUXDB_DATABASE="monitoring" --from-literal=INFLUXDB_USER="teraflow" \
+        --from-literal=INFLUXDB_PASSWORD="teraflow" --from-literal=INFLUXDB_HOSTNAME="localhost"
+    printf "\n"
+fi
+
+echo "Deploying components and collecting environment variables..."
+ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
+echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT
+PYTHONPATH=$(pwd)/src
+echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Processing '$COMPONENT' component..."
+    IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG"
+    IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+    echo "  Building Docker image..."
+    BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+
+    if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
+        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+    else 
+        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+    fi
+
+    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
+        echo "Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
+
+        TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+        docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
+
+        PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+        docker push "$IMAGE_URL" > "$PUSH_LOG"
+    fi
+
+    echo "  Adapting '$COMPONENT' manifest file..."
+    MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
+    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+
+    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
+        # Registry is set
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+        sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+    else
+        # Registry is not set
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
+        sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"        
+    fi
+
+    echo "  Deploying '$COMPONENT' component to Kubernetes..."
+    DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
+    kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+    kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+
+    echo "  Collecting env-vars for '$COMPONENT' component..."
+
+    SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json)
+    if [ -z "${SERVICE_DATA}" ]; then continue; fi
+
+    # Env vars for service's host address
+    SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
+    if [ -z "${SERVICE_HOST}" ]; then continue; fi
+    ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
+    echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
+
+    # Env vars for service's 'grpc' port (if any)
+    SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
+    if [ -n "${SERVICE_PORT_GRPC}" ]; then
+        ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
+    fi
+
+    # Env vars for service's 'http' port (if any)
+    SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
+    if [ -n "${SERVICE_PORT_HTTP}" ]; then
+        ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
+    fi
+
+    printf "\n"
+done
+
+echo "Deploying extra manifests..."
+for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
+    echo "Processing manifest '$EXTRA_MANIFEST'..."
+    kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
+    printf "\n"
+done
+
+# By now, leave this control here. Some component dependencies are not well handled
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Waiting for '$COMPONENT' component..."
+    kubectl wait --namespace $TFS_K8S_NAMESPACE \
+        --for='condition=available' --timeout=300s deployment/${COMPONENT}service
+    printf "\n"
+done
+
+if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
+    echo "Configuring WebUI DataStores and Dashboards..."
+    sleep 3
+
+    INFLUXDB_HOST="monitoringservice"
+    INFLUXDB_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service/monitoringservice -o jsonpath='{.spec.ports[?(@.name=="influxdb")].port}')
+    INFLUXDB_URL="http://${INFLUXDB_HOST}:${INFLUXDB_PORT}"
+    INFLUXDB_USER=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_USER}' | base64 --decode)
+    INFLUXDB_PASSWORD=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_PASSWORD}' | base64 --decode)
+    INFLUXDB_DATABASE=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_DB}' | base64 --decode)
+
+    # Exposed through the ingress controller "tfs-ingress"
+    GRAFANA_HOSTNAME="127.0.0.1"
+    GRAFANA_PORT="80"
+    GRAFANA_BASEURL="/grafana"
+
+    # Default Grafana credentials
+    GRAFANA_USERNAME="admin"
+    GRAFANA_PASSWORD="admin"
+
+    # Default Grafana API URL
+    GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
+
+    # Updated Grafana API URL
+    GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
+
+    echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..."
+
+    # Configure Grafana Admin Password
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
+    curl -X PUT -H "Content-Type: application/json" -d '{
+        "oldPassword": "'${GRAFANA_PASSWORD}'",
+        "newPassword": "'${TFS_GRAFANA_PASSWORD}'",
+        "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
+    }' ${GRAFANA_URL_DEFAULT}/api/user/password
+    echo
+
+    # Create InfluxDB DataSource
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
+    curl -X POST -H "Content-Type: application/json" -d '{
+        "type"     : "influxdb",
+        "name"     : "InfluxDB",
+        "url"      : "'"$INFLUXDB_URL"'",
+        "access"   : "proxy",
+        "basicAuth": false,
+        "user"     : "'"$INFLUXDB_USER"'",
+        "password" : "'"$INFLUXDB_PASSWORD"'",
+        "isDefault": true,
+        "database" : "'"$INFLUXDB_DATABASE"'"
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    echo
+
+    # Create Monitoring Dashboard
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
+    curl -X POST -H "Content-Type: application/json" \
+    -d '@src/webui/grafana_dashboard.json' \
+    ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tf-l3-monit"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+
+    printf "\n\n"
+fi
+
+./show_deploy.sh
+
+echo "Done!"
diff --git a/expose_ingress_grpc.sh b/expose_ingress_grpc.sh
new file mode 100755
index 0000000000000000000000000000000000000000..37d72aa8d66e1d2ff2e4677f245db8eaf2438ac4
--- /dev/null
+++ b/expose_ingress_grpc.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+# If not already set, set the list of components you want to build images for, and deploy.
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation policy service compute monitoring dbscanserving opticalattackmitigator opticalcentralizedattackdetector webui"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+echo "Exposing GRPC ports for components..."
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Processing '$COMPONENT' component..."
+
+    SERVICE_GRPC_PORT=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.name=="grpc")].port}')
+    if [ -z "${SERVICE_GRPC_PORT}" ]; then
+        printf "\n"
+        continue;
+    fi
+
+    PATCH='{"data": {"'${SERVICE_GRPC_PORT}'": "'$TFS_K8S_NAMESPACE'/'${COMPONENT}service':'${SERVICE_GRPC_PORT}'"}}'
+    #echo "PATCH: ${PATCH}"
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${SERVICE_GRPC_PORT}', "hostPort": '${SERVICE_GRPC_PORT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    #echo "PATCH: ${PATCH}"
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+
+    printf "\n"
+done
+
+echo "Done!"
diff --git a/install_development_dependencies.sh b/install_development_dependencies.sh
deleted file mode 100755
index 55b52803bd10950e18695eb39fadfbe98295aee0..0000000000000000000000000000000000000000
--- a/install_development_dependencies.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# installing basic tools
-pip install --upgrade pip setuptools wheel pip-tools pylint pytest pytest-benchmark coverage grpcio-tools
-
-# creating an empty file
-echo "" > requirements.in
-
-#TODO: include here your component
-COMPONENTS="compute context device service monitoring opticalcentralizedattackdetector opticalattackmitigator dbscanserving webui"
-
-# compiling dependencies from all components
-for component in $COMPONENTS
-do
-    echo "computing requirements for component $component"
-    diff requirements.in src/$component/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
-done
-
-pip-compile --output-file=requirements.txt requirements.in
-python -m pip install -r requirements.txt
-
-# removing the temporary files
-rm requirements.in
-rm requirements.txt
diff --git a/install_requirements.sh b/install_requirements.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ea9385729a6199be29926e4c13b6a05152446155
--- /dev/null
+++ b/install_requirements.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If not already set, set the list of components you want to install dependencies for.
+# By default, dependencies for all components are installed.
+# Components still not supported by this script:
+#   automation & policy : implemented in Java
+#   dlt                 : under design
+#   pathcomp            : under design
+ALL_COMPONENTS="context device service compute monitoring webui interdomain slice"
+ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalcentralizedattackdetector"
+ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
+TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
+
+echo "Updating PIP, SetupTools and Wheel..."
+pip install --upgrade pip               # ensure next packages get the latest versions
+pip install --upgrade setuptools wheel  # bring basic tooling for other requirements
+pip install --upgrade pip-tools pylint  # bring tooling for package compilation and code linting
+printf "\n"
+
+echo "Creating integrated requirements file..."
+touch requirements.in
+diff requirements.in common_requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
+printf "\n"
+
+echo "Collecting requirements from components..."
+for COMPONENT in $TFS_COMPONENTS
+do
+    if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then continue; fi
+    diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
+done
+printf "\n"
+
+echo "Compiling requirements..."
+# Done in a single step to prevent breaking dependencies between components
+pip-compile --quiet --output-file=requirements.txt requirements.in
+printf "\n"
+
+echo "Installing requirements..."
+python -m pip install -r requirements.txt
+printf "\n"
+
+#echo "Removing the temporary files..."
+rm requirements.in
+rm requirements.txt
+printf "\n"
diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..50ff81c79eaa02647562456809226d1aed847204
--- /dev/null
+++ b/manifests/nginx_ingress_http.yaml
@@ -0,0 +1,38 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: tfs-ingress
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  rules:
+  - http:
+      paths:
+        - path: /webui(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 8004
+        - path: /grafana(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: webuiservice
+              port:
+                number: 3000
+        - path: /context(/|$)(.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: contextservice
+              port:
+                number: 8080
+        - path: /()(restconf/.*)
+          pathType: Prefix
+          backend:
+            service:
+              name: computeservice
+              port:
+                number: 8080
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index 8bbf024dc4806ebcd6b6d2d2db80e4adc841ddfc..52fc75a9868001d50f7380cfe238fa344de27f6e 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -67,6 +67,11 @@ spec:
           - containerPort: 3000
             name: http-grafana
             protocol: TCP
+        env:
+        - name: GF_SERVER_ROOT_URL
+          value: "http://0.0.0.0:3000/grafana/"
+        - name: GF_SERVER_SERVE_FROM_SUB_PATH
+          value: "true"
         readinessProbe:
           failureThreshold: 3
           httpGet:
@@ -102,6 +107,9 @@ spec:
   selector:
     app: webuiservice
   ports:
-  - name: http
+  - name: webui
     port: 8004
     targetPort: 8004
+  - name: grafana
+    port: 3000
+    targetPort: 3000
diff --git a/my_deploy.sh b/my_deploy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..67a2e0558c25d767e14b635e6dd9174433827156
--- /dev/null
+++ b/my_deploy.sh
@@ -0,0 +1,22 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by comas, you want to build images for, and deploy.
+# Supported components are:
+#   context device automation policy service compute monitoring webui
+#   interdomain slice pathcomp dlt
+#   dbscanserving opticalattackmitigator opticalcentralizedattackdetector
+#   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
+export TFS_COMPONENTS="context device automation service compute monitoring webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
diff --git a/configure_dashboards.sh b/scripts/old/configure_dashboards_in_kubernetes.sh
similarity index 100%
rename from configure_dashboards.sh
rename to scripts/old/configure_dashboards_in_kubernetes.sh
diff --git a/deploy_in_kubernetes.sh b/scripts/old/deploy_in_kubernetes.sh
similarity index 99%
rename from deploy_in_kubernetes.sh
rename to scripts/old/deploy_in_kubernetes.sh
index a5e227cd4a9cd73468a17e273fb6e3efff97e054..89f45a5484f95f065f6656249f3fb04bf507a782 100755
--- a/deploy_in_kubernetes.sh
+++ b/scripts/old/deploy_in_kubernetes.sh
@@ -136,7 +136,7 @@ done
 
 if [[ "$COMPONENTS" == *"webui"* ]]; then
     echo "Configuring WebUI DataStores and Dashboards..."
-    ./configure_dashboards.sh
+    ./configure_dashboards_in_kubernetes.sh
     printf "\n\n"
 fi
 
diff --git a/scripts/report_coverage_all.sh b/scripts/report_coverage_all.sh
index a7e4797f3118a03c5f4db7eb384a67bdea4d795a..3b7df170c880ede72dea356752c5120e59dd9d71 100755
--- a/scripts/report_coverage_all.sh
+++ b/scripts/report_coverage_all.sh
@@ -16,7 +16,7 @@
 
 PROJECTDIR=`pwd`
 
-cd $(dirname $0)/src
+cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 echo
diff --git a/scripts/run_tests_locally-compute.sh b/scripts/run_tests_locally-compute.sh
index 48ce6e232a8005ee37fce8a0dbd9f7aed4cf83dc..d48fe417134d2f8c3078d549b3bb84e2cc745da6 100755
--- a/scripts/run_tests_locally-compute.sh
+++ b/scripts/run_tests_locally-compute.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=service.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     compute/tests/test_unitary.py
diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh
index 1cbe95453b30b241995a38d4bf0d034868fdee51..9e5ac4b92b5d55509173b23f0896cb108bdd3a1e 100755
--- a/scripts/run_tests_locally-context.sh
+++ b/scripts/run_tests_locally-context.sh
@@ -26,9 +26,5 @@ export REDIS_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status
 export REDIS_SERVICE_PORT=$(kubectl get service redis-tests --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}')
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     context/tests/test_unitary.py
diff --git a/scripts/run_tests_locally-device-all.sh b/scripts/run_tests_locally-device-all.sh
index 2cf8faaf50355a3cc5f3a0206498ed4dacb48523..a60eab0be932862cf1adc3a81678239de566bd37 100755
--- a/scripts/run_tests_locally-device-all.sh
+++ b/scripts/run_tests_locally-device-all.sh
@@ -21,9 +21,6 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
 
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     device/tests/test_unitary_emulated.py
 
diff --git a/scripts/run_tests_locally-device-emulated.sh b/scripts/run_tests_locally-device-emulated.sh
index ab4f77adaf9c0549551c91d944c1c6db77a8b9cb..541017f7a6b9f3d1289162ad69b27f572aa046cb 100755
--- a/scripts/run_tests_locally-device-emulated.sh
+++ b/scripts/run_tests_locally-device-emulated.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     device/tests/test_unitary_emulated.py
diff --git a/scripts/run_tests_locally-device-microwave.sh b/scripts/run_tests_locally-device-microwave.sh
index e03630c9f63c65cae91464b76cc3ddc447835f42..21f3e5ab67c882ab51f7c8c14a95ed6df26418de 100755
--- a/scripts/run_tests_locally-device-microwave.sh
+++ b/scripts/run_tests_locally-device-microwave.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     device/tests/test_unitary_microwave.py
diff --git a/scripts/run_tests_locally-device-openconfig.sh b/scripts/run_tests_locally-device-openconfig.sh
index 83d4a0545a3386395ead97f40d45c034350c73b9..f87346fed8ebe9b27c806759fafc851a15afd068 100755
--- a/scripts/run_tests_locally-device-openconfig.sh
+++ b/scripts/run_tests_locally-device-openconfig.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     device/tests/test_unitary_openconfig.py
diff --git a/scripts/run_tests_locally-device-p4.sh b/scripts/run_tests_locally-device-p4.sh
index 36b381a3cd9214603456828b41e6d70b8c6c908d..4e6754e4d56741f960e1e5562abb0c10abc0ccb4 100755
--- a/scripts/run_tests_locally-device-p4.sh
+++ b/scripts/run_tests_locally-device-p4.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     device/tests/test_unitary_p4.py
diff --git a/scripts/run_tests_locally-device-tapi.sh b/scripts/run_tests_locally-device-tapi.sh
index a281466b677f256b2ce9fe7770bf2b052ef59126..d37e4e2b7f8545c2033b0049722cdbb8c589b55b 100755
--- a/scripts/run_tests_locally-device-tapi.sh
+++ b/scripts/run_tests_locally-device-tapi.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     device/tests/test_unitary_tapi.py
diff --git a/scripts/run_tests_locally-service.sh b/scripts/run_tests_locally-service.sh
index 853eb97673e9e2a3a3fa28d025bd8af9ef4ea6cf..8a2a8d0be1d1960c6197a67e471ae29abba501a7 100755
--- a/scripts/run_tests_locally-service.sh
+++ b/scripts/run_tests_locally-service.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=service.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     service/tests/test_unitary.py
diff --git a/scripts/run_tests_locally-slice.sh b/scripts/run_tests_locally-slice.sh
index adad39b5b2de2f4de0f2451e89a20732d1ecda2c..fa3af4eba1f9d42a1f9d283964a536a00f9547ae 100755
--- a/scripts/run_tests_locally-slice.sh
+++ b/scripts/run_tests_locally-slice.sh
@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 
 # Run unitary tests and analyze coverage of code at same time
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=service.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     slice/tests/test_unitary.py
diff --git a/scripts/run_tests_locally.sh b/scripts/run_tests_locally.sh
index 69463ea79bf717565385a44f168e84780902fce8..1d48cc1af18629874b0275b1fa92bf31961741c3 100755
--- a/scripts/run_tests_locally.sh
+++ b/scripts/run_tests_locally.sh
@@ -51,9 +51,6 @@ export INFLUXDB_DATABASE=$(kubectl --namespace $K8S_NAMESPACE get secrets influx
 # First destroy old coverage file
 rm -f $COVERAGEFILE
 
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     common/orm/tests/test_unitary.py \
     common/message_broker/tests/test_unitary.py \
diff --git a/scripts/show_logs_automation.sh b/scripts/show_logs_automation.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8823f29c09960ce980f48d76463682d34e2ea09f
--- /dev/null
+++ b/scripts/show_logs_automation.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/automationservice
diff --git a/scripts/show_logs_compute.sh b/scripts/show_logs_compute.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5e061bb9eb49047b96027a39d3bc846a3e502b5c
--- /dev/null
+++ b/scripts/show_logs_compute.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/computeservice
diff --git a/scripts/show_logs_context.sh b/scripts/show_logs_context.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ece545a7e32131880079c2ce65a950c64a16273e
--- /dev/null
+++ b/scripts/show_logs_context.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
diff --git a/scripts/show_logs_device.sh b/scripts/show_logs_device.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e1c2e4aa8a5fd39e525fcf61ffcf5572e3e6c8d0
--- /dev/null
+++ b/scripts/show_logs_device.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice
diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5978035127735c20ddc6387666a5434cbac61ff8
--- /dev/null
+++ b/scripts/show_logs_monitoring.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server
diff --git a/scripts/show_logs_service.sh b/scripts/show_logs_service.sh
new file mode 100755
index 0000000000000000000000000000000000000000..251add7e1641862f3c95dbf038920bc86b3c89ff
--- /dev/null
+++ b/scripts/show_logs_service.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice
diff --git a/scripts/show_logs_webui.sh b/scripts/show_logs_webui.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c73f5f51a6aefe0caee2620cccca272f1abb8622
--- /dev/null
+++ b/scripts/show_logs_webui.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/webuiservice -c server
diff --git a/show_deploy.sh b/show_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e894d44f3d7f79ac18ce4f3d5b2708a6402764e6
--- /dev/null
+++ b/show_deploy.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+echo "Deployment Resources:"
+kubectl --namespace $TFS_K8S_NAMESPACE get all
+printf "\n"
+
+echo "Deployment Ingress:"
+kubectl --namespace $TFS_K8S_NAMESPACE get ingress
+printf "\n"
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py
index 7c0ef0183e93c1874fa01f89a90fdb8c66178fd6..dae9a7041704573b642e565c9d6ffad7ba107041 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py
@@ -22,9 +22,9 @@ DEFAULT_BGP_ROUTE_TARGET = '{:d}:{:d}'.format(DEFAULT_BGP_AS, 333)
 #       device_uuid, endpoint_uuid, router_id, route_distinguisher, sub_if_index, address_ip, address_prefix)
 BEARER_MAPPINGS = {
     # OFC'22
-    #'R1-INF:13/2/1': ('R1-INF', '13/2/1', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24),
+    #'R1-EMU:13/2/1': ('R1-EMU', '13/2/1', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24),
     #'R2-EMU:13/2/1': ('R2-EMU', '13/2/1', '12.12.12.1', '65000:120', 450, '3.4.2.1', 24),
-    #'R3-INF:13/2/1': ('R3-INF', '13/2/1', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24),
+    #'R3-EMU:13/2/1': ('R3-EMU', '13/2/1', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24),
     #'R4-EMU:13/2/1': ('R4-EMU', '13/2/1', '22.22.22.1', '65000:220', 550, '3.4.1.1', 24),
 
     # OECC/PSC'22 - domain 1
diff --git a/src/compute/tests/Constants.py b/src/compute/tests/Constants.py
index 8d4e2ba8fe6144e6fc11c61bbe2c8296d74fc910..640124b07fd8e5dc0dff0635175b1499544f1b2d 100644
--- a/src/compute/tests/Constants.py
+++ b/src/compute/tests/Constants.py
@@ -22,7 +22,7 @@ WIM_MAPPING  = [
         #'device_interface_id' : ??,                # pop_switch_port
         'service_endpoint_id' : 'ep-1',             # wan_service_endpoint_id
         'service_mapping_info': {                   # wan_service_mapping_info, other extra info
-            'bearer': {'bearer-reference': 'R1-INF:13/2/1'},
+            'bearer': {'bearer-reference': 'R1-EMU:13/1/2'},
             'site-id': '1',
         },
         #'switch_dpid'         : ??,                # wan_switch_dpid
@@ -34,7 +34,7 @@ WIM_MAPPING  = [
         #'device_interface_id' : ??,                # pop_switch_port
         'service_endpoint_id' : 'ep-2',             # wan_service_endpoint_id
         'service_mapping_info': {                   # wan_service_mapping_info, other extra info
-            'bearer': {'bearer-reference': 'R2-EMU:13/2/1'},
+            'bearer': {'bearer-reference': 'R2-EMU:13/1/2'},
             'site-id': '2',
         },
         #'switch_dpid'         : ??,                # wan_switch_dpid
@@ -46,7 +46,7 @@ WIM_MAPPING  = [
         #'device_interface_id' : ??,                # pop_switch_port
         'service_endpoint_id' : 'ep-3',             # wan_service_endpoint_id
         'service_mapping_info': {                   # wan_service_mapping_info, other extra info
-            'bearer': {'bearer-reference': 'R3-INF:13/2/1'},
+            'bearer': {'bearer-reference': 'R3-EMU:13/1/2'},
             'site-id': '3',
         },
         #'switch_dpid'         : ??,                # wan_switch_dpid
@@ -58,7 +58,7 @@ WIM_MAPPING  = [
         #'device_interface_id' : ??,                # pop_switch_port
         'service_endpoint_id' : 'ep-4',             # wan_service_endpoint_id
         'service_mapping_info': {                   # wan_service_mapping_info, other extra info
-            'bearer': {'bearer-reference': 'R4-EMU:13/2/1'},
+            'bearer': {'bearer-reference': 'R4-EMU:13/1/2'},
             'site-id': '4',
         },
         #'switch_dpid'         : ??,                # wan_switch_dpid
diff --git a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
index 9970852124fb0a5334087ef703a6b0fb5b3ef9a8..9e536450873fe9d6e31f66dd71088ae401fce64e 100644
--- a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
+++ b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java
@@ -56,6 +56,7 @@ import eu.teraflow.policy.context.model.Device;
 import eu.teraflow.policy.context.model.DeviceConfig;
 import eu.teraflow.policy.context.model.DeviceDriverEnum;
 import eu.teraflow.policy.context.model.DeviceOperationalStatus;
+import eu.teraflow.policy.context.model.Empty;
 import eu.teraflow.policy.context.model.EndPoint;
 import eu.teraflow.policy.context.model.EndPointId;
 import eu.teraflow.policy.context.model.Event;
@@ -95,11 +96,15 @@ import eu.teraflow.policy.monitoring.model.KpiValue;
 import eu.teraflow.policy.monitoring.model.KpiValueRange;
 import eu.teraflow.policy.monitoring.model.StringKpiValue;
 import eu.teraflow.policy.monitoring.model.SubsDescriptor;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.stream.Collectors;
 import javax.inject.Singleton;
 import kpi_sample_types.KpiSampleTypes;
 import monitoring.Monitoring;
+import monitoring.Monitoring.AlarmID;
 import monitoring.Monitoring.KpiId;
+import monitoring.Monitoring.SubscriptionID;
 import policy.Policy;
 import policy.Policy.PolicyRuleId;
 import policy.PolicyAction;
@@ -1538,6 +1543,21 @@ public class Serializer {
         return new KpiValueRange(minKpiValue, maxKpiValue);
     }
 
+    public AlarmID serializeAlarmId(String alarmId) {
+        final var builder = Monitoring.AlarmID.newBuilder();
+
+        final var serializedAlarmIdUuid = serializeUuid(alarmId);
+        builder.setAlarmId(serializedAlarmIdUuid);
+
+        return builder.build();
+    }
+
+    public String deserialize(AlarmID serializedAlarmId) {
+        final var serializedAlarmIdUuid = serializedAlarmId.getAlarmId();
+
+        return deserialize(serializedAlarmIdUuid);
+    }
+
     public Monitoring.AlarmDescriptor serialize(AlarmDescriptor alarmDescriptor) {
         final var builder = Monitoring.AlarmDescriptor.newBuilder();
 
@@ -1637,6 +1657,21 @@ public class Serializer {
         return new SubsDescriptor(kpiId, samplingDurationS, samplingIntervalS, startDate, endDate);
     }
 
+    public SubscriptionID serializeSubscriptionIdId(String subscriptionId) {
+        final var builder = Monitoring.SubscriptionID.newBuilder();
+
+        final var serializedSubscriptionIdUuid = serializeUuid(subscriptionId);
+        builder.setSubsId(serializedSubscriptionIdUuid);
+
+        return builder.build();
+    }
+
+    public String deserialize(SubscriptionID serializedSubscriptionId) {
+        final var serializedSubscriptionIdUuid = serializedSubscriptionId.getSubsId();
+
+        return deserialize(serializedSubscriptionIdUuid);
+    }
+
     public PolicyCondition.PolicyRuleCondition serialize(PolicyRuleCondition policyRuleCondition) {
         final var builder = PolicyCondition.PolicyRuleCondition.newBuilder();
 
@@ -1908,6 +1943,28 @@ public class Serializer {
         return new Kpi(kpiId, timestamp, kpiValue);
     }
 
+    public List<Monitoring.Kpi> serialize(List<Kpi> kpis) {
+        List<Monitoring.Kpi> serializedKpis = new ArrayList<>();
+
+        for (Kpi kpi : kpis) {
+            final var serializedKpi = serialize(kpi);
+
+            serializedKpis.add(serializedKpi);
+        }
+        return serializedKpis;
+    }
+
+    public List<Kpi> deserialize(List<Monitoring.Kpi> serializedKpis) {
+        List<Kpi> kpis = new ArrayList<>();
+
+        for (Monitoring.Kpi serializedKpi : serializedKpis) {
+            final var kpi = deserialize(serializedKpi);
+
+            kpis.add(kpi);
+        }
+        return kpis;
+    }
+
     public Monitoring.KpiDescriptor serialize(KpiDescriptor kpiDescriptor) {
         final var builder = Monitoring.KpiDescriptor.newBuilder();
 
@@ -2130,6 +2187,17 @@ public class Serializer {
                 deviceEndPoints);
     }
 
+    public ContextOuterClass.Empty serializeEmpty(Empty empty) {
+
+        final var builder = ContextOuterClass.Empty.newBuilder();
+
+        return builder.build();
+    }
+
+    public Empty deserializeEmpty(ContextOuterClass.Empty serializedEmpty) {
+        return new Empty();
+    }
+
     public Uuid serializeUuid(String uuid) {
         return Uuid.newBuilder().setUuid(uuid).build();
     }
diff --git a/src/policy/src/main/java/eu/teraflow/policy/context/model/Empty.java b/src/policy/src/main/java/eu/teraflow/policy/context/model/Empty.java
new file mode 100644
index 0000000000000000000000000000000000000000..d19dd9776e9688999007761e969e5c1b53d37176
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/context/model/Empty.java
@@ -0,0 +1,24 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.context.model;
+
+public class Empty {
+
+    public Empty() {
+        // Empty constructor to represent the Empty rpc message of context service
+    }
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringGateway.java b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringGateway.java
new file mode 100644
index 0000000000000000000000000000000000000000..4b9849a7649894cb4109fb458dac611e834bd916
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringGateway.java
@@ -0,0 +1,48 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.monitoring;
+
+import eu.teraflow.policy.context.model.Empty;
+import eu.teraflow.policy.monitoring.model.AlarmDescriptor;
+import eu.teraflow.policy.monitoring.model.AlarmResponse;
+import eu.teraflow.policy.monitoring.model.Kpi;
+import eu.teraflow.policy.monitoring.model.KpiDescriptor;
+import eu.teraflow.policy.monitoring.model.SubsDescriptor;
+import io.smallrye.mutiny.Multi;
+import io.smallrye.mutiny.Uni;
+import java.util.List;
+
+public interface MonitoringGateway {
+
+    Uni<String> createKpi(KpiDescriptor kpiDescriptor);
+
+    Uni<KpiDescriptor> getKpiDescriptor(String kpiId);
+
+    Multi<List<Kpi>> subscribeKpi(SubsDescriptor subsDescriptor);
+
+    Uni<SubsDescriptor> getSubsDescriptor(String subscriptionId);
+
+    Uni<Empty> editKpiSubscription(SubsDescriptor subsDescriptor);
+
+    Uni<String> createKpiAlarm(AlarmDescriptor alarmDescriptor);
+
+    Uni<Empty> editKpiAlarm(AlarmDescriptor alarmDescriptor);
+
+    Uni<AlarmDescriptor> getAlarmDescriptor(String alarmId);
+
+    Multi<AlarmResponse> getAlarmResponseStream(String alarmId);
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringGatewayImpl.java b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringGatewayImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..e0b4e088a9e23387f56d956bed5f6e104a68ea56
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringGatewayImpl.java
@@ -0,0 +1,136 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.monitoring;
+
+import eu.teraflow.policy.Serializer;
+import eu.teraflow.policy.context.model.Empty;
+import eu.teraflow.policy.monitoring.model.AlarmDescriptor;
+import eu.teraflow.policy.monitoring.model.AlarmResponse;
+import eu.teraflow.policy.monitoring.model.Kpi;
+import eu.teraflow.policy.monitoring.model.KpiDescriptor;
+import eu.teraflow.policy.monitoring.model.SubsDescriptor;
+import io.quarkus.grpc.GrpcClient;
+import io.smallrye.mutiny.Multi;
+import io.smallrye.mutiny.Uni;
+import java.util.List;
+import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
+import monitoring.MutinyMonitoringServiceGrpc.MutinyMonitoringServiceStub;
+
+@ApplicationScoped
+public class MonitoringGatewayImpl implements MonitoringGateway {
+
+    @GrpcClient("monitoring")
+    MutinyMonitoringServiceStub streamingDelegateMonitoring;
+
+    private final Serializer serializer;
+
+    @Inject
+    public MonitoringGatewayImpl(Serializer serializer) {
+        this.serializer = serializer;
+    }
+
+    @Override
+    public Uni<String> createKpi(KpiDescriptor kpiDescriptor) {
+        final var serializedKpiDescriptor = serializer.serialize(kpiDescriptor);
+
+        return streamingDelegateMonitoring
+                .createKpi(serializedKpiDescriptor)
+                .onItem()
+                .transform(serializer::deserialize);
+    }
+
+    @Override
+    public Uni<KpiDescriptor> getKpiDescriptor(String kpiId) {
+        final var serializedKpiId = serializer.serializeKpiId(kpiId);
+
+        return streamingDelegateMonitoring
+                .getKpiDescriptor(serializedKpiId)
+                .onItem()
+                .transform(serializer::deserialize);
+    }
+
+    @Override
+    public Multi<List<Kpi>> subscribeKpi(SubsDescriptor subsDescriptor) {
+        final var serializedSubsDescriptor = serializer.serialize(subsDescriptor);
+
+        return streamingDelegateMonitoring
+                .subscribeKpi(serializedSubsDescriptor)
+                .onItem()
+                .transform(kpiList -> serializer.deserialize(kpiList.getKpiListList()));
+    }
+
+    @Override
+    public Uni<SubsDescriptor> getSubsDescriptor(String subscriptionId) {
+        final var serializedSubscriptionId = serializer.serializeSubscriptionIdId(subscriptionId);
+
+        return streamingDelegateMonitoring
+                .getSubsDescriptor(serializedSubscriptionId)
+                .onItem()
+                .transform(serializer::deserialize);
+    }
+
+    @Override
+    public Uni<Empty> editKpiSubscription(SubsDescriptor subsDescriptor) {
+        final var serializedSubsDescriptor = serializer.serialize(subsDescriptor);
+
+        return streamingDelegateMonitoring
+                .editKpiSubscription(serializedSubsDescriptor)
+                .onItem()
+                .transform(serializer::deserializeEmpty);
+    }
+
+    @Override
+    public Uni<String> createKpiAlarm(AlarmDescriptor alarmDescriptor) {
+        final var serializedAlarmDescriptor = serializer.serialize(alarmDescriptor);
+
+        return streamingDelegateMonitoring
+                .createKpiAlarm(serializedAlarmDescriptor)
+                .onItem()
+                .transform(serializer::deserialize);
+    }
+
+    @Override
+    public Uni<Empty> editKpiAlarm(AlarmDescriptor alarmDescriptor) {
+        final var serializedAlarmDescriptor = serializer.serialize(alarmDescriptor);
+
+        return streamingDelegateMonitoring
+                .editKpiAlarm(serializedAlarmDescriptor)
+                .onItem()
+                .transform(serializer::deserializeEmpty);
+    }
+
+    @Override
+    public Uni<AlarmDescriptor> getAlarmDescriptor(String alarmId) {
+        final var serializedAlarmId = serializer.serializeAlarmId(alarmId);
+
+        return streamingDelegateMonitoring
+                .getAlarmDescriptor(serializedAlarmId)
+                .onItem()
+                .transform(serializer::deserialize);
+    }
+
+    @Override
+    public Multi<AlarmResponse> getAlarmResponseStream(String alarmId) {
+        final var serializedAlarmId = serializer.serializeAlarmId(alarmId);
+
+        return streamingDelegateMonitoring
+                .getAlarmResponseStream(serializedAlarmId)
+                .onItem()
+                .transform(serializer::deserialize);
+    }
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringService.java b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringService.java
new file mode 100644
index 0000000000000000000000000000000000000000..276a9d3632655cf684ae4dff0469d477ff15a88e
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringService.java
@@ -0,0 +1,48 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.monitoring;
+
+import eu.teraflow.policy.context.model.Empty;
+import eu.teraflow.policy.monitoring.model.AlarmDescriptor;
+import eu.teraflow.policy.monitoring.model.AlarmResponse;
+import eu.teraflow.policy.monitoring.model.Kpi;
+import eu.teraflow.policy.monitoring.model.KpiDescriptor;
+import eu.teraflow.policy.monitoring.model.SubsDescriptor;
+import io.smallrye.mutiny.Multi;
+import io.smallrye.mutiny.Uni;
+import java.util.List;
+
+public interface MonitoringService {
+
+    Uni<String> createKpi(KpiDescriptor kpiDescriptor);
+
+    Uni<KpiDescriptor> getKpiDescriptor(String kpiId);
+
+    Multi<List<Kpi>> subscribeKpi(SubsDescriptor subsDescriptor);
+
+    Uni<SubsDescriptor> getSubsDescriptor(String subscriptionId);
+
+    Uni<Empty> editKpiSubscription(SubsDescriptor subsDescriptor);
+
+    Uni<String> createKpiAlarm(AlarmDescriptor alarmDescriptor);
+
+    Uni<Empty> editKpiAlarm(AlarmDescriptor alarmDescriptor);
+
+    Uni<AlarmDescriptor> getAlarmDescriptor(String alarmId);
+
+    Multi<AlarmResponse> getAlarmResponseStream(String alarmId);
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringServiceImpl.java b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringServiceImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..e1e79af757b9866360040b785fd41dd1f0c70cd4
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/monitoring/MonitoringServiceImpl.java
@@ -0,0 +1,85 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.monitoring;
+
+import eu.teraflow.policy.context.model.Empty;
+import eu.teraflow.policy.monitoring.model.AlarmDescriptor;
+import eu.teraflow.policy.monitoring.model.AlarmResponse;
+import eu.teraflow.policy.monitoring.model.Kpi;
+import eu.teraflow.policy.monitoring.model.KpiDescriptor;
+import eu.teraflow.policy.monitoring.model.SubsDescriptor;
+import io.smallrye.mutiny.Multi;
+import io.smallrye.mutiny.Uni;
+import java.util.List;
+import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
+
+@ApplicationScoped
+public class MonitoringServiceImpl implements MonitoringService {
+
+    private final MonitoringGateway monitoringGateway;
+
+    @Inject
+    public MonitoringServiceImpl(MonitoringGateway monitoringGateway) {
+        this.monitoringGateway = monitoringGateway;
+    }
+
+    @Override
+    public Uni<String> createKpi(KpiDescriptor kpiDescriptor) {
+        return monitoringGateway.createKpi(kpiDescriptor);
+    }
+
+    @Override
+    public Uni<KpiDescriptor> getKpiDescriptor(String kpiId) {
+        return monitoringGateway.getKpiDescriptor(kpiId);
+    }
+
+    @Override
+    public Multi<List<Kpi>> subscribeKpi(SubsDescriptor subsDescriptor) {
+        return monitoringGateway.subscribeKpi(subsDescriptor);
+    }
+
+    @Override
+    public Uni<SubsDescriptor> getSubsDescriptor(String subscriptionId) {
+        return monitoringGateway.getSubsDescriptor(subscriptionId);
+    }
+
+    @Override
+    public Uni<Empty> editKpiSubscription(SubsDescriptor subsDescriptor) {
+        return monitoringGateway.editKpiSubscription(subsDescriptor);
+    }
+
+    @Override
+    public Uni<String> createKpiAlarm(AlarmDescriptor alarmDescriptor) {
+        return monitoringGateway.createKpiAlarm(alarmDescriptor);
+    }
+
+    @Override
+    public Uni<Empty> editKpiAlarm(AlarmDescriptor alarmDescriptor) {
+        return monitoringGateway.editKpiAlarm(alarmDescriptor);
+    }
+
+    @Override
+    public Uni<AlarmDescriptor> getAlarmDescriptor(String alarmId) {
+        return monitoringGateway.getAlarmDescriptor(alarmId);
+    }
+
+    @Override
+    public Multi<AlarmResponse> getAlarmResponseStream(String alarmId) {
+        return monitoringGateway.getAlarmResponseStream(alarmId);
+    }
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/service/ServiceGateway.java b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceGateway.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8b338f4fb8f12bd77749529a92f920c525af5b7
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceGateway.java
@@ -0,0 +1,26 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.service;
+
+import eu.teraflow.policy.context.model.Service;
+import eu.teraflow.policy.context.model.ServiceId;
+import io.smallrye.mutiny.Uni;
+
+public interface ServiceGateway {
+
+    Uni<ServiceId> updateService(Service service);
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/service/ServiceGatewayImpl.java b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceGatewayImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..b69994ce42c81a0b52bec02b34ac5a4572bfe800
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceGatewayImpl.java
@@ -0,0 +1,50 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.service;
+
+import eu.teraflow.policy.Serializer;
+import eu.teraflow.policy.context.model.Service;
+import eu.teraflow.policy.context.model.ServiceId;
+import io.quarkus.grpc.GrpcClient;
+import io.smallrye.mutiny.Uni;
+import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
+import service.MutinyServiceServiceGrpc.MutinyServiceServiceStub;
+
+@ApplicationScoped
+public class ServiceGatewayImpl implements ServiceGateway {
+
+    @GrpcClient("service")
+    MutinyServiceServiceStub streamingDelegateService;
+
+    private final Serializer serializer;
+
+    @Inject
+    public ServiceGatewayImpl(Serializer serializer) {
+        this.serializer = serializer;
+    }
+
+    @Override
+    public Uni<ServiceId> updateService(Service service) {
+        final var serializedService = serializer.serialize(service);
+
+        return streamingDelegateService
+                .updateService(serializedService)
+                .onItem()
+                .transform(serializer::deserialize);
+    }
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/service/ServiceService.java b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceService.java
new file mode 100644
index 0000000000000000000000000000000000000000..e6ab0871f0e5f7d8392cc6a3358b584e11911a6a
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceService.java
@@ -0,0 +1,26 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.service;
+
+import eu.teraflow.policy.context.model.Service;
+import eu.teraflow.policy.context.model.ServiceId;
+import io.smallrye.mutiny.Uni;
+
+public interface ServiceService {
+
+    Uni<ServiceId> updateService(Service service);
+}
diff --git a/src/policy/src/main/java/eu/teraflow/policy/service/ServiceServiceImpl.java b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceServiceImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..921560e0f3d1805d9307d73fa7942ceeb451fb1d
--- /dev/null
+++ b/src/policy/src/main/java/eu/teraflow/policy/service/ServiceServiceImpl.java
@@ -0,0 +1,39 @@
+/*
+* Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package eu.teraflow.policy.service;
+
+import eu.teraflow.policy.context.model.Service;
+import eu.teraflow.policy.context.model.ServiceId;
+import io.smallrye.mutiny.Uni;
+import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
+
+@ApplicationScoped
+public class ServiceServiceImpl implements ServiceService {
+
+    private final ServiceGateway serviceGateway;
+
+    @Inject
+    public ServiceServiceImpl(ServiceGateway serviceGateway) {
+        this.serviceGateway = serviceGateway;
+    }
+
+    @Override
+    public Uni<ServiceId> updateService(Service service) {
+        return serviceGateway.updateService(service);
+    }
+}
diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
index e8c582013366c993992ff3fd504bb9b2c7578c17..40e8ce80bacc02cca70a730f0f5a618e761d597d 100644
--- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
+++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java
@@ -57,6 +57,7 @@ import eu.teraflow.policy.context.model.Device;
 import eu.teraflow.policy.context.model.DeviceConfig;
 import eu.teraflow.policy.context.model.DeviceDriverEnum;
 import eu.teraflow.policy.context.model.DeviceOperationalStatus;
+import eu.teraflow.policy.context.model.Empty;
 import eu.teraflow.policy.context.model.EndPoint;
 import eu.teraflow.policy.context.model.EndPointId;
 import eu.teraflow.policy.context.model.Event;
@@ -2317,6 +2318,35 @@ class SerializerTest {
         assertThat(numericalOperator).isEqualTo(expectedNumericalOperator);
     }
 
+    @Test
+    void shouldSerializeSubscriptionId() {
+        final var subscriptionId = "subscriptionId";
+
+        final var subscriptionIdUuid = serializer.serializeUuid(subscriptionId);
+
+        final var expectedSubscriptionId =
+                Monitoring.SubscriptionID.newBuilder().setSubsId(subscriptionIdUuid).build();
+
+        final var serializedSubscriptionId = serializer.serializeSubscriptionIdId(subscriptionId);
+
+        assertThat(serializedSubscriptionId)
+                .usingRecursiveComparison()
+                .isEqualTo(expectedSubscriptionId);
+    }
+
+    @Test
+    void shouldDeserializeSubscriptionId() {
+        final var expectedSubscriptionId = "expectedSubscriptionId";
+
+        final var serializedSubscriptionIdUuid = serializer.serializeUuid(expectedSubscriptionId);
+        final var serializedSubscriptionId =
+                Monitoring.SubscriptionID.newBuilder().setSubsId(serializedSubscriptionIdUuid).build();
+
+        final var subscriptionId = serializer.deserialize(serializedSubscriptionId);
+
+        assertThat(subscriptionId).isEqualTo(expectedSubscriptionId);
+    }
+
     @ParameterizedTest
     @MethodSource("provideNumericalOperator")
     void shouldSerializePolicyRuleConditionGivenMultipleNumericalOperators(
@@ -2367,7 +2397,7 @@ class SerializerTest {
 
     @ParameterizedTest
     @MethodSource("provideKpiValues")
-    void shouldSerializeKpiValues(KpiValue kpiValue, Monitoring.KpiValue expectedKpiValue) {
+    void shouldSerializeKpiValues(KpiValue<?> kpiValue, Monitoring.KpiValue expectedKpiValue) {
         final var serializedKpiValue = serializer.serialize(kpiValue);
 
         assertThat(serializedKpiValue).isEqualTo(expectedKpiValue);
@@ -2376,7 +2406,7 @@ class SerializerTest {
     @ParameterizedTest
     @MethodSource("provideKpiValues")
     void shouldDeserializeKpiValues(
-            KpiValue expectedKpiValue, Monitoring.KpiValue serializedKpiValue) {
+            KpiValue<?> expectedKpiValue, Monitoring.KpiValue serializedKpiValue) {
         final var kpiValue = serializer.deserialize(serializedKpiValue);
 
         assertThat(kpiValue).usingRecursiveComparison().isEqualTo(expectedKpiValue);
@@ -2556,6 +2586,32 @@ class SerializerTest {
                 .isEqualTo(expectedKpiValueRange.getKpiMaxValue());
     }
 
+    @Test
+    void shouldSerializeAlarmId() {
+        final var alarmId = "alarmId";
+
+        final var alarmIdUuid = serializer.serializeUuid(alarmId);
+
+        final var expectedAlarmId = Monitoring.AlarmID.newBuilder().setAlarmId(alarmIdUuid).build();
+
+        final var serializedAlarmId = serializer.serializeAlarmId(alarmId);
+
+        assertThat(serializedAlarmId).usingRecursiveComparison().isEqualTo(expectedAlarmId);
+    }
+
+    @Test
+    void shouldDeserializeAlarmId() {
+        final var expectedAlarmId = "expectedAlarmId";
+
+        final var serializedAlarmIdUuid = serializer.serializeUuid(expectedAlarmId);
+        final var serializedAlarmId =
+                Monitoring.KpiId.newBuilder().setKpiId(serializedAlarmIdUuid).build();
+
+        final var alarmId = serializer.deserialize(serializedAlarmId);
+
+        assertThat(alarmId).isEqualTo(expectedAlarmId);
+    }
+
     @Test
     void shouldSerializeAlarmDescriptor() {
         final var alarmDescription = "alarmDescription";
@@ -3091,6 +3147,84 @@ class SerializerTest {
         assertThat(kpi).usingRecursiveComparison().isEqualTo(expectedKpi);
     }
 
+    @Test
+    void shouldSerializeKpisList() {
+        final var expectedKpiIdA = "expectedKpiIdA";
+        final var expectedTimestampA = "expectedTimestampA";
+        final var expectedKpiValueA = new FloatKpiValue(643.45f);
+        final var serializedKpiIdA = serializer.serializeKpiId(expectedKpiIdA);
+        final var serializedKpiValueA = serializer.serialize(expectedKpiValueA);
+        final var kpiA = new Kpi(expectedKpiIdA, expectedTimestampA, expectedKpiValueA);
+
+        final var expectedKpiIdB = "expectedKpiIdB";
+        final var expectedTimestampB = "expectedTimestampB";
+        final var expectedKpiValueB = new IntegerKpiValue(32);
+        final var serializedKpiIdB = serializer.serializeKpiId(expectedKpiIdB);
+        final var serializedKpiValueB = serializer.serialize(expectedKpiValueB);
+        final var kpiB = new Kpi(expectedKpiIdB, expectedTimestampB, expectedKpiValueB);
+
+        final var kpis = List.of(kpiA, kpiB);
+
+        final var expectedKpiA =
+                Monitoring.Kpi.newBuilder()
+                        .setKpiId(serializedKpiIdA)
+                        .setTimestamp(expectedTimestampA)
+                        .setKpiValue(serializedKpiValueA)
+                        .build();
+
+        final var expectedKpiB =
+                Monitoring.Kpi.newBuilder()
+                        .setKpiId(serializedKpiIdB)
+                        .setTimestamp(expectedTimestampB)
+                        .setKpiValue(serializedKpiValueB)
+                        .build();
+
+        final var expectedKpis = List.of(expectedKpiA, expectedKpiB);
+
+        final var serializedKpis = serializer.serialize(kpis);
+
+        assertThat(serializedKpis).usingRecursiveComparison().isEqualTo(expectedKpis);
+    }
+
+    @Test
+    void shouldDeserializeKpisList() {
+        final var expectedKpiIdA = "expectedKpiIdA";
+        final var expectedTimestampA = "expectedTimestampA";
+        final var expectedKpiValueA = new FloatKpiValue(643.45f);
+        final var serializedKpiIdA = serializer.serializeKpiId(expectedKpiIdA);
+        final var serializedKpiValueA = serializer.serialize(expectedKpiValueA);
+        final var expectedKpiA = new Kpi(expectedKpiIdA, expectedTimestampA, expectedKpiValueA);
+
+        final var expectedKpiIdB = "expectedKpiIdB";
+        final var expectedTimestampB = "expectedTimestampB";
+        final var expectedKpiValueB = new IntegerKpiValue(32);
+        final var serializedKpiIdB = serializer.serializeKpiId(expectedKpiIdB);
+        final var serializedKpiValueB = serializer.serialize(expectedKpiValueB);
+        final var expectedKpiB = new Kpi(expectedKpiIdB, expectedTimestampB, expectedKpiValueB);
+
+        final var expectedKpis = List.of(expectedKpiA, expectedKpiB);
+
+        final var serializedKpiA =
+                Monitoring.Kpi.newBuilder()
+                        .setKpiId(serializedKpiIdA)
+                        .setTimestamp(expectedTimestampA)
+                        .setKpiValue(serializedKpiValueA)
+                        .build();
+
+        final var serializedKpiB =
+                Monitoring.Kpi.newBuilder()
+                        .setKpiId(serializedKpiIdB)
+                        .setTimestamp(expectedTimestampB)
+                        .setKpiValue(serializedKpiValueB)
+                        .build();
+
+        final var serializedKpis = List.of(serializedKpiA, serializedKpiB);
+
+        final var kpis = serializer.deserialize(serializedKpis);
+
+        assertThat(kpis).usingRecursiveComparison().isEqualTo(expectedKpis);
+    }
+
     @Test
     void shouldSerializeKpiDescriptor() {
         final var expectedKpiDescription = "expectedKpiDescription";
@@ -3581,6 +3715,27 @@ class SerializerTest {
         assertThat(device).usingRecursiveComparison().isEqualTo(expectedDevice);
     }
 
+    @Test
+    void shouldSerializeEmpty() {
+        final var empty = new Empty();
+        final var expectedEmpty = ContextOuterClass.Empty.newBuilder().build();
+
+        final var serializeEmpty = serializer.serializeEmpty(empty);
+
+        assertThat(serializeEmpty).isEqualTo(expectedEmpty);
+    }
+
+    @Test
+    void shouldDeserializeEmpty() {
+        final var expectedEmpty = new Empty();
+
+        final var serializedEmpty = serializer.serializeEmpty(expectedEmpty);
+
+        final var empty = serializer.deserializeEmpty(serializedEmpty);
+
+        assertThat(empty).usingRecursiveComparison().isEqualTo(expectedEmpty);
+    }
+
     @Test
     void shouldSerializeUuid() {
         final var expectedUuid = "uuid";
diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml
index 06068f0f5983b4ef385ee60b8a6cb5d812cab253..d70d05ad91cface20dd5ad1fc15ec1578a0767ca 100644
--- a/src/policy/target/kubernetes/kubernetes.yml
+++ b/src/policy/target/kubernetes/kubernetes.yml
@@ -3,20 +3,20 @@ apiVersion: v1
 kind: Service
 metadata:
   annotations:
-    app.quarkus.io/commit-id: 1d77cb00ae8f577885de32f01f4740f865853863
-    app.quarkus.io/build-timestamp: 2022-07-26 - 10:46:55 +0000
+    app.quarkus.io/commit-id: 4a11d9130e05e969e9370636484943e1fe2f8bd1
+    app.quarkus.io/build-timestamp: 2022-07-27 - 12:54:10 +0000
   labels:
     app.kubernetes.io/name: policyservice
     app: policyservice
   name: policyservice
 spec:
   ports:
-    - name: http
-      port: 8080
-      targetPort: 8080
     - name: grpc
       port: 6060
       targetPort: 6060
+    - name: http
+      port: 8080
+      targetPort: 8080
   selector:
     app.kubernetes.io/name: policyservice
   type: ClusterIP
@@ -25,8 +25,8 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   annotations:
-    app.quarkus.io/commit-id: 1d77cb00ae8f577885de32f01f4740f865853863
-    app.quarkus.io/build-timestamp: 2022-07-26 - 10:46:55 +0000
+    app.quarkus.io/commit-id: 4a11d9130e05e969e9370636484943e1fe2f8bd1
+    app.quarkus.io/build-timestamp: 2022-07-27 - 12:54:10 +0000
   labels:
     app: policyservice
     app.kubernetes.io/name: policyservice
@@ -39,8 +39,8 @@ spec:
   template:
     metadata:
       annotations:
-        app.quarkus.io/commit-id: 1d77cb00ae8f577885de32f01f4740f865853863
-        app.quarkus.io/build-timestamp: 2022-07-26 - 10:46:55 +0000
+        app.quarkus.io/commit-id: 4a11d9130e05e969e9370636484943e1fe2f8bd1
+        app.quarkus.io/build-timestamp: 2022-07-27 - 12:54:10 +0000
       labels:
         app: policyservice
         app.kubernetes.io/name: policyservice
@@ -51,12 +51,12 @@ spec:
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.namespace
+            - name: CONTEXT_SERVICE_HOST
+              value: contextservice
             - name: SERVICE_SERVICE_HOST
               value: serviceservice
             - name: MONITORING_SERVICE_HOST
               value: monitoringservice
-            - name: CONTEXT_SERVICE_HOST
-              value: contextservice
           image: registry.gitlab.com/teraflow-h2020/controller/policy:0.1.0
           imagePullPolicy: Always
           livenessProbe:
@@ -71,12 +71,12 @@ spec:
             timeoutSeconds: 10
           name: policyservice
           ports:
-            - containerPort: 8080
-              name: http
-              protocol: TCP
             - containerPort: 6060
               name: grpc
               protocol: TCP
+            - containerPort: 8080
+              name: http
+              protocol: TCP
           readinessProbe:
             failureThreshold: 3
             httpGet:
diff --git a/src/tests/oeccpsc22/tests/test_functional_delete_interdomain_slice.py b/src/tests/oeccpsc22/tests/test_functional_delete_interdomain_slice.py
index 2830225be8738fb7b5e5b02f2d04007a94cbfa85..40a954868620564aef7d60c5ec0023ea0a32337b 100644
--- a/src/tests/oeccpsc22/tests/test_functional_delete_interdomain_slice.py
+++ b/src/tests/oeccpsc22/tests/test_functional_delete_interdomain_slice.py
@@ -113,6 +113,7 @@ def test_interdomain_slice_removal(
     #assert len(service_uuids) == 1  # assume a single service has been created
     #service_uuid = set(service_uuids).pop()
     #osm_wim.delete_connectivity_service(service_uuid)
+    pass
 
 
 def test_interdomain_slice_removed(
diff --git a/src/tests/ofc22/README.md b/src/tests/ofc22/README.md
deleted file mode 100644
index bfc06bf0e8a388d38ae10c73eb38694e6305d803..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services
-This functional test reproduces the live demonstration "Demonstration of Zero-touch Device and L3-VPN Service
-Management Using the TeraFlow Cloud-native SDN Controller" carried out at
-[OFC'22](https://www.ofcconference.org/en-us/home/program-speakers/demo/).
-
-## Functional test folder
-This functional test can be found in folder `./src/tests/ofc22/`. A convenience alias `./ofc22/` pointing to that folder has been defined.
-
-## Execute with real devices
-This functional test is designed to operate both with real and emulated devices.
-By default, emulated devices are used; however, if you have access to real devices, you can create/modify the files `./ofc22/tests/Objects.py` and `./ofc22/tests/Credentials.py` to point to your devices, and map to your network topology.
-Note that the default scenario assumes devices R2 and R4 are always emulated, while devices R1, R3, and O1 can be configured as emulated or real devices.
-
-__Important:__ The OpenConfigDriver, the P4Driver, and the TrandportApiDriver have to be considered as experimental. The configuration and monitoring capabilities they support are limited or partially implemented. Use them with care.
-
-## Deployment
-To run this functional test, it is assumed you have deployed a Kubernetes-based environment as described in [Wiki: Installing Kubernetes on your Linux machine](https://gitlab.com/teraflow-h2020/controller/-/wikis/Installing-Kubernetes-on-your-Linux-machine).
-
-After installing Kubernetes, you can run it to deploy the appropriate components. Feel free to adapt it your particular case following the instructions described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance).
-
-__Important:__
-- The `./ofc22/deploy_in_kubernetes.sh` assumes you have installed the appropriate development dependencies using the `install_development_dependencies.sh` script.
-- Before running the scripts in this folder, remember to update the environment variable K8S_HOSTNAME to point to the Kubernetes node you will be using as described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance).
-
-For your convenience, the configuration script `./ofc22/deploy_in_kubernetes.sh` has been already defined. The script will take some minutes to download the dependencies, build the micro-services, deploy them, and leave them ready for operation. The deployment will finish with a report of the items that have been created.
-
-## Access to the WebUI and Dashboard
-When the deployment completes, you can connect to the TeraFlow OS WebUI and Dashboards as described in [Wiki: Using the WebUI](https://gitlab.com/teraflow-h2020/controller/-/wikis/Using-the-WebUI), or directly navigating to `http://[your-node-ip]:30800` for the WebUI and `http://[your-node-ip]:30300` for the Grafana Dashboard.
-
-Notes:
-- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`.
-- in Grafana, you can find the "L3-Monitorng" in the "Starred dashboards" section.
-
-## Test execution
-To execute this functional test, four main steps needs to be carried out:
-1. Device bootstrapping
-2. L3VPN Service creation
-3. L3VPN Service removal
-4. Cleanup
-
-Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there is some error during the execution, you should see a detailed report on the error. See the troubleshooting section in that case.
-
-Feel free to check the logs of the different components using the appropriate `ofc22/show_logs_[component].sh` scripts after you execute each step.
-
-### 1. Device bootstrapping
-
-This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The expected results are:
-- The devices to be incorporated into the Topology.
-- The devices to be pre-configured and initialized as ENABLED by the Automation component.
-- The monitoring for the device ports (named as endpoints in TeraFlow OS) to be activated and data collection to automatically start.
-- The links to be added to the topology.
-
-To run this step, execute the following script:
-`./ofc22/run_test_01_bootstrap.sh`
-
-When the script finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a 0-valued flat plot.
-
-In the WebUI, select the "admin" Context. In the "Devices" tab you should see that 5 different emulated devices have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab you should see that there is no service created. Note here that the emulated devices produce synthetic randomly-generated data and do not care about the services configured.
-
-### 2. L3VPN Service creation
-
-This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance.
-
-To run this step, execute the following script:
-`./ofc22/run_test_02_create_service.sh`
-
-When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, you should see the plots with the monitored data for the device. By default, device R1-INF is selected.
-
-### 3. L3VPN Service removal
-
-This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock OSM instance.
-
-To run this step, execute the following script:
-`./ofc22/run_test_03_delete_service.sh`
-
-When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed. Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again.
-
-### 4. Cleanup
-
-This last step just performs a cleanup of the scenario removing all the TeraFlow OS entities for completeness.
-
-To run this step, execute the following script:
-`./ofc22/run_test_04_cleanup.sh`
-
-When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in the "Services" tab you can see that the "admin" Context has no services given that that context has been removed.
-
-## Troubleshooting
-
-Different scripts are provided to help in troubleshooting issues in the execution of the test. These scripts are:
-- `./ofc22/show_deployment.sh`: this script reports the items belonging to this deployment. Use it to validate that all the pods, deployments and replica sets are ready and have a state of "running"; and the services are deployed and have appropriate IP addresses and ports.
-- `ofc22/show_logs_automation.sh`: this script reports the logs for the automation component.
-- `ofc22/show_logs_compute.sh`: this script reports the logs for the compute component.
-- `ofc22/show_logs_context.sh`: this script reports the logs for the context component.
-- `ofc22/show_logs_device.sh`: this script reports the logs for the device component.
-- `ofc22/show_logs_monitoring.sh`: this script reports the logs for the monitoring component.
-- `ofc22/show_logs_service.sh`: this script reports the logs for the service component.
-- `ofc22/show_logs_webui.sh`: this script reports the logs for the webui component.
diff --git a/src/tests/ofc22/deploy_in_kubernetes.sh b/src/tests/ofc22/deploy_in_kubernetes.sh
deleted file mode 100755
index 1b725e5d629831d3c80be8ce8a09925e4bcc9c8e..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/deploy_in_kubernetes.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# OFC 22 deployment settings
-
-export REGISTRY_IMAGE=""
-export COMPONENTS="context device service compute webui automation monitoring"
-export IMAGE_TAG="ofc22"
-export K8S_NAMESPACE="ofc22"
-export K8S_HOSTNAME="kubernetes-master"
-export EXTRA_MANIFESTS="./ofc22/expose_services.yaml"
-export GRAFANA_PASSWORD="admin123+"
-
-./deploy_in_kubernetes.sh
diff --git a/src/tests/ofc22/expose_services.yaml b/src/tests/ofc22/expose_services.yaml
deleted file mode 100644
index d514383615e7b9dca20f22dbb6ef3438457953cc..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/expose_services.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: contextservice-public
-  labels:
-    app: contextservice
-spec:
-  type: NodePort
-  selector:
-    app: contextservice
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 1010
-    targetPort: 1010
-    nodePort: 30101
-  - name: redis
-    protocol: TCP
-    port: 6379
-    targetPort: 6379
-    nodePort: 30637
-  - name: http
-    protocol: TCP
-    port: 8080
-    targetPort: 8080
-    nodePort: 31808
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: deviceservice-public
-  labels:
-    app: deviceservice
-spec:
-  type: NodePort
-  selector:
-    app: deviceservice
-  ports:
-  - name: grpc
-    protocol: TCP
-    port: 2020
-    targetPort: 2020
-    nodePort: 30202
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: monitoringservice-public
-  labels:
-    app: monitoringservice
-spec:
-  type: NodePort
-  selector:
-    app: monitoringservice
-  ports:
-  - name: influx
-    protocol: TCP
-    port: 8086
-    targetPort: 8086
-    nodePort: 30886
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: computeservice-public
-spec:
-  type: NodePort
-  selector:
-    app: computeservice
-  ports:
-  - name: http
-    protocol: TCP
-    port: 8080
-    targetPort: 8080
-    nodePort: 30808
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: webuiservice-public
-  labels:
-    app: webuiservice
-spec:
-  type: NodePort
-  selector:
-    app: webuiservice
-  ports:
-  - name: http
-    protocol: TCP
-    port: 8004
-    targetPort: 8004
-    nodePort: 30800
-  - name: grafana
-    protocol: TCP
-    port: 3000
-    targetPort: 3000
-    nodePort: 30300
diff --git a/src/tests/ofc22/redeploy_webui.sh b/src/tests/ofc22/redeploy_webui.sh
deleted file mode 100755
index 975f84a9d3b75e00a809acd336d844973cb26897..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/redeploy_webui.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-export COMPONENT="webui"
-export IMAGE_TAG="ofc22"
-export K8S_NAMESPACE="ofc22"
-export K8S_HOSTNAME="kubernetes-master"
-export GRAFANA_PASSWORD="admin123+"
-
-# Constants
-TMP_FOLDER="./tmp"
-
-# Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
-mkdir -p $TMP_MANIFESTS_FOLDER
-TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
-mkdir -p $TMP_LOGS_FOLDER
-
-echo "Processing '$COMPONENT' component..."
-IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
-
-echo "  Building Docker image..."
-BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
-docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG"
-
-sleep 1
-
-echo "  Deploying '$COMPONENT' component to Kubernetes..."
-kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT}service
-kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT}service
-printf "\n"
-
-sleep 1
-
-echo "Waiting for '$COMPONENT' component..."
-kubectl wait --namespace $K8S_NAMESPACE --for='condition=available' --timeout=300s deployment/${COMPONENT}service
-printf "\n"
-
-echo "Configuring DataStores and Dashboards..."
-./configure_dashboards.sh
-printf "\n\n"
-
-echo "Reporting Deployment..."
-kubectl --namespace $K8S_NAMESPACE get all
-printf "\n"
-
-echo "Done!"
diff --git a/src/tests/ofc22/run_test_01_bootstrap.sh b/src/tests/ofc22/run_test_01_bootstrap.sh
index 634fed02dd71464c6878f0c96fb67cf3067148e2..be30b15189786de3fd2f593a1584c73890e9e4fe 100755
--- a/src/tests/ofc22/run_test_01_bootstrap.sh
+++ b/src/tests/ofc22/run_test_01_bootstrap.sh
@@ -13,39 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-PROJECTDIR=`pwd`
-
-cd $PROJECTDIR/src
-RCFILE=$PROJECTDIR/coverage/.coveragerc
-COVERAGEFILE=$PROJECTDIR/coverage/.coverage
-
-# Configure the correct folder on the .coveragerc file
-cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE
-
-# Destroy old coverage file
-rm -f $COVERAGEFILE
-
-# Set the name of the Kubernetes namespace and hostname to use.
-K8S_NAMESPACE="ofc22"
-# K8S_HOSTNAME="kubernetes-master"
-# dynamically gets the name of the K8s master node
-K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
-
-# Flush Context database
-kubectl --namespace $K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
-
-export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
-export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
-export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
-# Run functional test and analyze coverage of code at same time
-
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-    tests/ofc22/tests/test_functional_bootstrap.py
+pytest --verbose src/tests/ofc22/tests/test_functional_bootstrap.py
diff --git a/src/tests/ofc22/run_test_02_create_service.sh b/src/tests/ofc22/run_test_02_create_service.sh
index 5498f91f2a3186ca694443dfc047760464ad2663..20fc3db65dd57ae8697253443050b1767d9b77a1 100755
--- a/src/tests/ofc22/run_test_02_create_service.sh
+++ b/src/tests/ofc22/run_test_02_create_service.sh
@@ -13,29 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-PROJECTDIR=`pwd`
-
-cd $PROJECTDIR/src
-RCFILE=$PROJECTDIR/coverage/.coveragerc
-COVERAGEFILE=$PROJECTDIR/coverage/.coverage
-
-# Set the name of the Kubernetes namespace and hostname to use.
-K8S_NAMESPACE="ofc22"
-# dynamically gets the name of the K8s master node
-K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
-
-export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
-export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
-export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
-# Run functional test and analyze coverage of code at same time
-
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-    tests/ofc22/tests/test_functional_create_service.py
+pytest --verbose src/tests/ofc22/tests/test_functional_create_service.py
diff --git a/src/tests/ofc22/run_test_03_delete_service.sh b/src/tests/ofc22/run_test_03_delete_service.sh
index 7a8e3a662610042fc3aaf603f8944e48d5573dd2..98073013d84e9d64e56dd9022ac163b6321ce389 100755
--- a/src/tests/ofc22/run_test_03_delete_service.sh
+++ b/src/tests/ofc22/run_test_03_delete_service.sh
@@ -13,29 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-PROJECTDIR=`pwd`
-
-cd $PROJECTDIR/src
-RCFILE=$PROJECTDIR/coverage/.coveragerc
-COVERAGEFILE=$PROJECTDIR/coverage/.coverage
-
-# Set the name of the Kubernetes namespace and hostname to use.
-K8S_NAMESPACE="ofc22"
-# dynamically gets the name of the K8s master node
-K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
-
-export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
-export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
-export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
-# Run functional test and analyze coverage of code at same time
-
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-    tests/ofc22/tests/test_functional_delete_service.py
+pytest --verbose src/tests/ofc22/tests/test_functional_delete_service.py
diff --git a/src/tests/ofc22/run_test_04_cleanup.sh b/src/tests/ofc22/run_test_04_cleanup.sh
index 5995a804f84db1d18f7e1ed18676bc575af7e80b..f7c0aad8da0b0446d188ec1fad3f0fc0e7dc2b4a 100755
--- a/src/tests/ofc22/run_test_04_cleanup.sh
+++ b/src/tests/ofc22/run_test_04_cleanup.sh
@@ -13,29 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-PROJECTDIR=`pwd`
-
-cd $PROJECTDIR/src
-RCFILE=$PROJECTDIR/coverage/.coveragerc
-COVERAGEFILE=$PROJECTDIR/coverage/.coverage
-
-# Set the name of the Kubernetes namespace and hostname to use.
-K8S_NAMESPACE="ofc22"
-# dynamically gets the name of the K8s master node
-K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
-
-export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
-export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
-export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
-
-# Useful flags for pytest:
-#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
-
-# Run functional test and analyze coverage of code at same time
-
-coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-    tests/ofc22/tests/test_functional_cleanup.py
+pytest --verbose src/tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/run_tests_and_coverage.sh b/src/tests/ofc22/run_tests_and_coverage.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fa5026db2310c8753d8e4476707ce46a38ecb0f2
--- /dev/null
+++ b/src/tests/ofc22/run_tests_and_coverage.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+COVERAGEFILE=$PROJECTDIR/coverage/.coverage
+
+# Configure the correct folder on the .coveragerc file
+cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE
+
+# Destroy old coverage file
+rm -f $COVERAGEFILE
+
+# Force a flush of Context database
+kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
+
+# Run functional tests and analyze code coverage at the same time
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ofc22/tests/test_functional_bootstrap.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ofc22/tests/test_functional_create_service.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ofc22/tests/test_functional_delete_service.py
+
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    tests/ofc22/tests/test_functional_cleanup.py
diff --git a/src/tests/ofc22/show_deploy.sh b/src/tests/ofc22/show_deploy.sh
deleted file mode 100755
index 58fce79e32819478627c87b8a5fb8ea7701db2d7..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_deploy.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE get all
diff --git a/src/tests/ofc22/show_logs_automation.sh b/src/tests/ofc22/show_logs_automation.sh
deleted file mode 100755
index 778cfaa942bcb36a81ccd571afe0f024c32d373d..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_logs_automation.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE logs deployment/automationservice
diff --git a/src/tests/ofc22/show_logs_compute.sh b/src/tests/ofc22/show_logs_compute.sh
deleted file mode 100755
index cafde447ace44cc71fc75d27af2a50100f155681..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_logs_compute.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE logs deployment/computeservice
diff --git a/src/tests/ofc22/show_logs_context.sh b/src/tests/ofc22/show_logs_context.sh
deleted file mode 100755
index 6d5b77fa9e0565e6df66856829644f31f55a4197..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_logs_context.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE logs deployment/contextservice -c server
diff --git a/src/tests/ofc22/show_logs_device.sh b/src/tests/ofc22/show_logs_device.sh
deleted file mode 100755
index 9d976755a959dd8674a5cfe4fffb7104c27e8521..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_logs_device.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE logs deployment/deviceservice
diff --git a/src/tests/ofc22/show_logs_monitoring.sh b/src/tests/ofc22/show_logs_monitoring.sh
deleted file mode 100755
index 3dd7522fa57eca242225d29571956923075e14d8..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_logs_monitoring.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE logs deployment/monitoringservice -c server
diff --git a/src/tests/ofc22/show_logs_service.sh b/src/tests/ofc22/show_logs_service.sh
deleted file mode 100755
index 2589a3cfe16f4383904c342366f3efc01c42d470..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_logs_service.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE logs deployment/serviceservice
diff --git a/src/tests/ofc22/show_logs_webui.sh b/src/tests/ofc22/show_logs_webui.sh
deleted file mode 100755
index ecf4f3f6fc22dd71eef2a6db2b5ac18f54ccca35..0000000000000000000000000000000000000000
--- a/src/tests/ofc22/show_logs_webui.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-K8S_NAMESPACE="ofc22"
-kubectl --namespace $K8S_NAMESPACE logs deployment/webuiservice -c server
diff --git a/src/tests/ofc22/tests/Objects.py b/src/tests/ofc22/tests/Objects.py
index fd48210fa4c4ed507e090c8d225aa3755805446f..bda08d7761ab3ad794246e6f94932c147a787993 100644
--- a/src/tests/ofc22/tests/Objects.py
+++ b/src/tests/ofc22/tests/Objects.py
@@ -86,7 +86,7 @@ if not USE_REAL_DEVICES:
     json_device_packetrouter_disabled = json_device_emulated_packet_router_disabled
     json_device_tapi_disabled         = json_device_emulated_tapi_disabled
 
-DEVICE_R1_UUID          = 'R1-INF'
+DEVICE_R1_UUID          = 'R1-EMU'
 DEVICE_R1_TIMEOUT       = 120
 DEVICE_R1_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
 DEVICE_R1_ID            = json_device_id(DEVICE_R1_UUID)
@@ -113,7 +113,7 @@ ENDPOINT_ID_R2_13_1_2   = DEVICE_R2_ENDPOINT_IDS[1]
 DEVICE_R2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R2_ENDPOINT_DEFS)
 
 
-DEVICE_R3_UUID          = 'R3-INF'
+DEVICE_R3_UUID          = 'R3-EMU'
 DEVICE_R3_TIMEOUT       = 120
 DEVICE_R3_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)]
 DEVICE_R3_ID            = json_device_id(DEVICE_R3_UUID)
@@ -186,24 +186,15 @@ def compose_service_endpoint_id(endpoint_id):
     endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
     return ':'.join([device_uuid, endpoint_uuid])
 
-def compose_bearer(endpoint_id, router_id, route_distinguisher):
-    device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
-    endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
-    return '#'.join([device_uuid, endpoint_uuid, router_id, route_distinguisher])
-
-WIM_SEP_R1_ID          = compose_service_endpoint_id(ENDPOINT_ID_R1_13_1_2)
-WIM_SEP_R1_ROUTER_ID   = '10.10.10.1'
-WIM_SEP_R1_ROUTER_DIST = '65000:111'
-WIM_SEP_R1_SITE_ID     = '1'
-WIM_SEP_R1_BEARER      = compose_bearer(ENDPOINT_ID_R1_13_1_2, WIM_SEP_R1_ROUTER_ID, WIM_SEP_R1_ROUTER_DIST)
-WIM_SRV_R1_VLAN_ID     = 400
-
-WIM_SEP_R3_ID          = compose_service_endpoint_id(ENDPOINT_ID_R3_13_1_2)
-WIM_SEP_R3_ROUTER_ID   = '20.20.20.1'
-WIM_SEP_R3_ROUTER_DIST = '65000:222'
-WIM_SEP_R3_SITE_ID     = '2'
-WIM_SEP_R3_BEARER      = compose_bearer(ENDPOINT_ID_R3_13_1_2, WIM_SEP_R3_ROUTER_ID, WIM_SEP_R3_ROUTER_DIST)
-WIM_SRV_R3_VLAN_ID     = 500
+WIM_SEP_R1_ID      = compose_service_endpoint_id(ENDPOINT_ID_R1_13_1_2)
+WIM_SEP_R1_SITE_ID = '1'
+WIM_SEP_R1_BEARER  = WIM_SEP_R1_ID
+WIM_SRV_R1_VLAN_ID = 400
+
+WIM_SEP_R3_ID      = compose_service_endpoint_id(ENDPOINT_ID_R3_13_1_2)
+WIM_SEP_R3_SITE_ID = '2'
+WIM_SEP_R3_BEARER  = WIM_SEP_R3_ID
+WIM_SRV_R3_VLAN_ID = 500
 
 WIM_USERNAME = 'admin'
 WIM_PASSWORD = 'admin'
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 1901948676c7adbe6e926eda0d42d796e23dcca3..85d3aeeb7c6f23ab4123412173cdfda4d27b23a4 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -118,6 +118,9 @@ def topology():
 def about():
     return render_template('main/about.html')
 
+@main.get('/debug')
+def debug():
+    return render_template('main/debug.html')
 
 @main.get('/resetsession')
 def reset_session():
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index a24edaa541a09c7a22f52d9bf3e705c62c6ef1c6..d314acb3d5cbe607e82474be7e66302f3d620d6a 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -75,10 +75,15 @@
                 <a class="nav-link" href="{{ url_for('service.home') }}">Service</a>
                 {% endif %}
               </li>
-              
+
+              <li class="nav-item">
+                <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
+              </li>
+
               <li class="nav-item">
-                <a class="nav-link" href="#" id="grafana_link" target="grafana">Grafana</a>
+                <a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
               </li>
+
               <!-- <li class="nav-item">
                 <a class="nav-link" href="#">Context</a>
               </li>
@@ -143,9 +148,9 @@
     <!-- Option 1: Bootstrap Bundle with Popper -->
     <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.2/dist/js/bootstrap.bundle.min.js" integrity="sha384-kQtW33rZJAHjgefvhyyzcGF3C5TFyBQBA13V1RKPf4uH+bwyzQxZ6CmMZHmNBEfJ" crossorigin="anonymous"></script>
     <!-- <script src="{{ url_for('static', filename='site.js') }}"/> -->
-    <script>
+    <!-- <script>
       document.getElementById("grafana_link").href = window.location.protocol + "//" + window.location.hostname + ":30300"
-    </script>
+    </script> -->
     <!-- Option 2: Separate Popper and Bootstrap JS -->
     <!--
     <script src="https://cdn.jsdelivr.net/npm/@popperjs/core@2.10.2/dist/umd/popper.min.js" integrity="sha384-7+zCNj/IqJ95wo16oMtfsKbZ9ccEh31eOz1HGyDuCQ6wgnyJNSYdrPa03rtR1zdB" crossorigin="anonymous"></script>
diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html
new file mode 100644
index 0000000000000000000000000000000000000000..d065cc49d7262940beedd5eb9aa44a2ab890a07e
--- /dev/null
+++ b/src/webui/service/templates/main/debug.html
@@ -0,0 +1,36 @@
+<!--
+ Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+    <h1>Debug</h1>
+
+    <h3>Dump ContextDB:</h3>
+    <ul>
+        <li>
+            <a class="nav-link" href="/context/api/dump/html" id="context_html_link" target="context_html">
+                as HTML
+            </a>
+        </li>
+        <li>
+            <a class="nav-link" href="/context/api/dump/text" id="context_text_link" target="context_text">
+                as Text
+            </a>
+        </li>
+    </ul>
+
+{% endblock %}
diff --git a/tutorial/1-0-deployment.md b/tutorial/1-0-deployment.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d56808daf6df8ed8ed5ba1f6133858199d19994
--- /dev/null
+++ b/tutorial/1-0-deployment.md
@@ -0,0 +1,12 @@
+# 1. Deployment Guide
+
+This section walks you through the process of deploying TeraFlowSDN on top of a Virtual Machine (VM) running MicroK8s
+Kubernetes platform. The guide includes the details on configuring and installing the VM, installing and configuring
+MicroK8s, and deploying and reporting the status of the TeraFlowSDN controller.
+
+## Table of Content:
+- [1.1. Create VM for the TeraFlowSDN controller](./1-1-create-vm.md)
+- [1.2. Install MicroK8s Kubernetes platform](./1-2-install-microk8s.md)
+- [1.3. Deploy TeraFlowSDN over MicroK8s](./1-3-deploy-tfs.md)
+- [1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md)
+- [1.5. Show Deployment and Log per Component](./1-5-deploy-logs-troubleshooting.md)
diff --git a/tutorial/1-1-1-create-vm-oracle-virtualbox.md b/tutorial/1-1-1-create-vm-oracle-virtualbox.md
new file mode 100644
index 0000000000000000000000000000000000000000..c53601e145d4ad0ca41c55803c27d55309070231
--- /dev/null
+++ b/tutorial/1-1-1-create-vm-oracle-virtualbox.md
@@ -0,0 +1,90 @@
+## 1.1.1. Oracle VirtualBox
+
+### 1.1.1.1. Create a NAT Network in VirtualBox
+In "Oracle VM VirtualBox Manager", Menu "File > Preferences... > Network", create a NAT network with the following
+specifications:
+
+|Name       |CIDR       |DHCP    |IPv6    |
+|-----------|-----------|--------|--------|
+|TFS-NAT-Net|10.0.2.0/24|Disabled|Disabled|
+
+Within the newly created "TFS-NAT-Net" NAT network, configure the following IPv4 forwarding rules:
+
+|Name|Protocol|Host IP  |Host Port|Guest IP |Guest Port|
+|----|--------|---------|---------|---------|----------|
+|SSH |TCP     |127.0.0.1|2200     |10.0.2.10|22        |
+|HTTP|TCP     |127.0.0.1|8080     |10.0.2.10|80        |
+
+__Note__: IP address 10.0.2.10 is the one that will be assigned to the VM.
+
+
+### 1.1.1.2. Create VM in VirtualBox:
+
+- Name: TFS-VM
+- Type/Version: Linux / Ubuntu (64-bit)
+- CPU (*): 4 vCPUs @ 100% execution capacity
+- RAM: 8 GB
+- Disk: 40 GB, Virtual Disk Image (VDI), Dynamically allocated
+- Optical Drive ISO Image: "ubuntu-20.04.4-live-server-amd64.iso"
+  (from [Ubuntu Server 20.04 LTS](https://releases.ubuntu.com/20.04/))
+- Network Adapter 1 (*): enabled, attached to NAT Network "TFS-NAT-Net"
+- Minor adjustments (*):
+  - Audio: disabled
+  - Boot otder: disable "Floppy"
+
+__Note__: (*) settings to be editing after the VM is created.
+
+### 1.1.1.3. Install Ubuntu 20.04 LTS Operating System
+In "Oracle VM VirtualBox Manager", start the VM in normal mode, and follow the installation procedure. Below we provide
+some installation guidelines:
+- Installation Language: English
+- Autodetect your keyboard
+- Configure static network specifications:
+
+|Interface|IPv4 Method|Subnet     |Address  |Gateway |Name servers   |Search domains|
+|---------|-----------|-----------|---------|--------|---------------|--------------|
+|enp0s3   |Manual     |10.0.2.0/24|10.0.2.10|10.0.2.1|8.8.8.8,8.8.4.4|<empty>       |
+
+- Leave proxy and mirror addresses as they are
+- Let the installer self-upgrade (if asked). At the time of writing this walkthrough, the version of the installer is 22.06.1. Anyway, Ubuntu 20.04 LTS OS will be installed.
+- Use an entire disk for the installation
+  - Disable setup of the disk as LVM group
+  - Double check that NO swap space is allocated in the partition table. Kubernetes does not work properly with SWAP.
+- Configure your user and system names:
+  - User name: TeraFlowSDN
+  - Server's name: tfs-vm
+  - Username: tfs
+  - Password: tfs123
+- Install Open SSH Server
+  - Import SSH keys, if any.
+- Featured Server Snaps
+  - Do not install featured server snaps. It will be done manually later to illustrate how to uninstall and reinstall
+    them in case of trouble with.
+- Let the system install and upgrade the packages.
+  - This operation might take some minutes depending on how old is the Optical Drive ISO image you use and your
+    Internet connection speed.
+- Restart the VM when the installation is completed.
+
+## 1.1.1.4. Upgrade the Ubuntu distribution
+```bash
+sudo apt-get update -y
+sudo apt-get dist-upgrade -y
+```
+
+## 1.1.1.5. Install VirtualBox Guest Additions
+On VirtualBox Manager, open the VM main screen. If you are running the VM in headless mode, right click over the VM in
+the VirtualBox Manager window and click "Show". If a dialog informing about how to leave the interface of the VM is
+hown, confirm pressing "Switch" button. The interface of the VM should appear.
+
+Click menu "Device > Insert Guest Additions CD image..."
+
+On the VM terminal, type:
+```bash
+sudo apt-get install -y linux-headers-$(uname -r) build-essential dkms
+  # This command might take some minutes depending on your VM specs and your Internet access speed.
+sudo mount /dev/cdrom /mnt/
+cd /mnt/
+sudo ./VBoxLinuxAdditions.run
+  # This command might take some minutes depending on your VM specs.
+sudo reboot
+```
diff --git a/tutorial/1-1-2-create-vm-vmware-fusion.md b/tutorial/1-1-2-create-vm-vmware-fusion.md
new file mode 100644
index 0000000000000000000000000000000000000000..7147c24af8a2cc182c8e6987387254218a3f9db9
--- /dev/null
+++ b/tutorial/1-1-2-create-vm-vmware-fusion.md
@@ -0,0 +1,54 @@
+## 1.1.2. VMWare Fusion
+
+### 1.1.2.1. Create VM in VMWare Fusion:
+
+In "VMWare Fusion" manager, create a new network from the "Settings/Network" menu.
+
+- Unlock to make changes
+- Press the + icon and create a new network
+- Change the name to TFS-NAT-Net
+- Check "Allow virtual machines on this networkto connect to external network (NAT)"
+- Do not check "Enable IPv6"
+- Add port forwarding for HTTP and SSH
+- Uncheck "Provide address on this network via DHCP"
+
+Create a new VM an Ubuntu 20.04 iso:
+
+- Display Name: TeraFlowSDN
+- Username: tfs
+- Password: tfs123
+
+On the next screen press "Customize Settings", save the VM and in "Settings" change:
+- Change to use 4 CPUs
+- Change to access 8 GB of RAM
+- Change disk to size 40 GB
+- Change the network interface to use the previously created TFS-NAT-Net
+
+Run the VM to start the installation.
+
+### 1.1.2.2. Install Ubuntu 20.04 LTS Operating System
+
+The installation will be automatic, without any configuration required.
+
+- Configure the guest ip, gateway and DNS:
+
+  Using the Network Settings for the wired connection, set the IP to 10.0.2.10,
+  the mask to 255.255.255.0, the gatway to 10.0.2.2 and the DNS to 10.0.2.2.
+
+- Disable and remove swap file:
+
+  $ sudo swapoff -a
+  $ sudo rm /swapfile
+
+  Then you can remove or comment the /swapfile entry in /etc/fstab
+
+- Install Open SSH Server
+  - Import SSH keys, if any.
+
+- Restart the VM when the installation is completed.
+
+### 1.1.2.3. Upgrade the Ubuntu distribution
+```bash
+sudo apt-get update -y
+sudo apt-get dist-upgrade -y
+```
diff --git a/tutorial/1-1-create-vm.md b/tutorial/1-1-create-vm.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce74e6dc6f8df07d5f7cf42d979a7b54d61bc9a6
--- /dev/null
+++ b/tutorial/1-1-create-vm.md
@@ -0,0 +1,13 @@
+# 1.1. Create VM for the TeraFlowSDN controller
+
+In this section, we install a VM to be used as the deployment, execution, and development environment for the ETSI
+TeraFlowSDN controller. If you already have a remote physical server fitting the requirements specified in this section
+feel free to use it instead of deploying a local VM. Other virtualization environments can also be used; in that case,
+you will need to adapt these instructions to your particular case.
+
+Different Hypervisors are considered for that. Check the table of contents for available options. If you want to
+contribute with other Hypervisors, [contact](./README.md#contact) the TFS team through Slack.
+
+## Table of Content:
+- [1.1.1. Oracle VirtualBox](./1-1-1-create-vm-oracle-virtualbox.md)
+- [1.1.2. VMWare Fusion](./1-1-2-create-vm-vmware-fusion.md)
diff --git a/tutorial/1-2-install-microk8s.md b/tutorial/1-2-install-microk8s.md
new file mode 100644
index 0000000000000000000000000000000000000000..09e0b41a36a9b6c88883377be6c0737157f7afba
--- /dev/null
+++ b/tutorial/1-2-install-microk8s.md
@@ -0,0 +1,121 @@
+# 1.2. Install MicroK8s Kubernetes platform
+
+This section describes how to deploy the MicroK8s Kubernetes platform and configure it to be used with ETSI TeraFlowSDN
+controller. Besides, Docker is installed to build docker images for the ETSI TeraFlowSDN controller.
+
+The steps described in this section might take some minutes depending on your internet connection speed and the
+resources assigned to your VM, or the specifications of your physical server.
+
+
+## 1.2.1. Upgrade the Ubuntu distribution
+Skip this step if you already did it during the creation of the VM.
+```bash
+sudo apt-get update -y
+sudo apt-get dist-upgrade -y
+```
+
+
+## 1.2.2. Install prerequisites
+```bash
+sudo apt-get install -y ca-certificates curl gnupg lsb-release snapd jq
+```
+
+
+## 1.2.3. Install Docker CE
+Install Docker CE
+```bash
+sudo apt-get install -y docker.io
+```
+
+Add key "insecure-registries" with the private repository to the daemon configuration. It is done in two commands since
+sometimes read from and write to same file might cause trouble.
+
+```bash
+if [ -s /etc/docker/daemon.json ]; then cat /etc/docker/daemon.json; else echo '{}'; fi \
+    | jq 'if has("insecure-registries") then . else .+ {"insecure-registries": []} end' -- \
+    | jq '."insecure-registries" |= (.+ ["localhost:32000"] | unique)' -- \
+    | tee tmp.daemon.json
+sudo mv tmp.daemon.json /etc/docker/daemon.json
+sudo chown root:root /etc/docker/daemon.json
+sudo chmod 600 /etc/docker/daemon.json
+```
+
+Restart the Docker daemon
+```bash
+sudo systemctl restart docker
+```
+
+
+## 1.2.4. Install MicroK8s
+Ref: https://ubuntu.com/tutorials/install-a-local-kubernetes-with-microk8s
+Ref: https://microk8s.io/#install-microk8s
+
+```bash
+# Install MicroK8s
+sudo snap install microk8s --classic --channel=1.24/stable
+
+# Create alias for command "microk8s.kubectl" to be usable as "kubectl"
+sudo snap alias microk8s.kubectl kubectl
+
+# Verify status of ufw firewall
+sudo ufw status
+
+# If ufw is active, install following rules to enable access pod-to-pod and pod-to-internet
+sudo ufw allow in on cni0 && sudo ufw allow out on cni0
+sudo ufw default allow routed
+```
+
+
+## 1.2.5. Add user to the docker and microk8s groups
+```bash
+sudo usermod -a -G docker $USER
+sudo usermod -a -G microk8s $USER
+sudo chown -f -R $USER ~/.kube
+sudo reboot
+```
+
+## 1.2.6. Check status of Kubernetes
+```bash
+microk8s.status --wait-ready
+```
+
+
+## 1.2.7. Check all resources in Kubernetes
+```bash
+microk8s.kubectl get all --all-namespaces
+```
+
+
+## 1.2.8. Enable addons
+The Addons enabled are:
+- `dns`: enables resolving the pods and services by name
+- `hostpath-storage`: enables providing storage for the pods (required by `registry`)
+- `ingress`: deploys an ingress controller to expose the microservices outside Kubernetes
+- `registry`: deploys a private registry for the TFS controller images
+
+```bash
+microk8s.enable dns hostpath-storage ingress registry
+```
+
+__Note__: enabling some of the addons might take few minutes.
+          [Check status](./1-2-install-microk8s.md#124-check-status-of-kubernetes) periodically until all addons are
+          shown as enabled. Then [Check resources](./1-2-install-microk8s.md#125-check-all-resources-in-kubernetes)
+          periodically until all pods are Ready and Running.
+
+
+## 1.2.9. Stop, Restart, and Redeploy
+Find below some additional commands you might need while you work with MicroK8s:
+```bash
+microk8s.stop  # stop MicroK8s cluster (for instance, before power off your computer)
+microk8s.start # start MicroK8s cluster
+microk8s.reset # reset infrastructure to a clean state
+```
+
+If the following commands does not work to recover the MicroK8s cluster, you can redeploy it.
+First remove the current deployment as follows:
+```bash
+sudo snap remove microk8s
+sudo apt-get remove --purge docker.io
+```
+
+Then, redeploy as it is described in this section.
diff --git a/tutorial/1-3-deploy-tfs.md b/tutorial/1-3-deploy-tfs.md
new file mode 100644
index 0000000000000000000000000000000000000000..07c79d7ab34f12b9042a38489752b28bd4fd474e
--- /dev/null
+++ b/tutorial/1-3-deploy-tfs.md
@@ -0,0 +1,94 @@
+# 1.3. Deploy TeraFlowSDN over MicroK8s
+
+This section describes how to deploy TeraFlowSDN controller on top of MicroK8s using the environment configured in the
+previous sections.
+
+
+## 1.3.1. Install prerequisites
+```bash
+sudo apt-get install -y git curl jq
+```
+
+
+## 1.3.2. Clone the Git repository of the TeraFlowSDN controller
+__Important__: Right now, we have two repositories hosting the code of TeraFlowSDN: GitLab.com and ETSI owned GitLab
+               repository. Nowadays, only GitLab.com repository accepts code contributions that are periodically
+               mirrored to ETSI labs. In the near future, we plan to swap the repository roles and new contributions
+               will be accepted only at ETSI labs, while GitLab.com will probably be kept as a mirror of ETSI. If you
+               plan to contribute code to the TeraFlowSDN controller, by now, clone from GitLab.com. We will update the
+               tutorial as soon as roles of repositories are swapped.
+
+Clone from GitLab (if you want to contribute code to TeraFlowSDN):
+```bash
+mkdir ~/tfs-ctrl
+git clone https://gitlab.com/teraflow-h2020/controller.git ~/tfs-ctrl
+```
+
+Clone from ETSI owned GitLab (if you do not plan to contribute code):
+```bash
+mkdir ~/tfs-ctrl
+git clone https://labs.etsi.org/rep/tfs/controller.git ~/tfs-ctrl
+```
+
+
+## 1.3.3. Checkout the appropriate Git branch
+By default 'master' branch is checked out. If you want to deploy 'develop' that incorporates the most up-to-date code
+contributions and features, run the following command:
+```bash
+cd ~/tfs-ctrl
+git checkout develop
+```
+
+__Important__: During the elaboration and validation of the tutorials, you should checkout branch
+               "feat/microk8s-deployment". Otherwise, you will not have important files such as "my_deploy.sh" or
+               "deploy.sh". As soon as the tutorials are completed and approved, we will remove this note and merge the
+               "feat/microk8s-deployment" into "develop" and later into "master", and then the previous step will be
+               effective.
+
+
+## 1.3.4. Prepare a deployment script with the deployment settings
+Create a new deployment script, e.g., `my_deploy.sh`, adding the appropriate settings as follows. This script, by
+default, makes use of the private Docker registry enabled in MicroK8s, as specified in `TFS_REGISTRY_IMAGE`. It builds
+the Docker images for the subset of components defined in `TFS_COMPONENTS`, tags them with the tag defined in
+`TFS_IMAGE_TAG`, deploys them in the namespace defined in `TFS_K8S_NAMESPACE`, and (optionally) deploys the extra
+Kubernetes manifests listed in `TFS_EXTRA_MANIFESTS`. Besides, it lets you specify in `TFS_GRAFANA_PASSWORD` the
+password to be set for the Grafana `admin` user.
+
+```bash
+cd ~/tfs-ctrl
+tee my_deploy.sh >/dev/null <<EOF
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+export TFS_COMPONENTS="context device automation service compute monitoring webui"
+export TFS_IMAGE_TAG="dev"
+export TFS_K8S_NAMESPACE="tfs"
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+export TFS_GRAFANA_PASSWORD="admin123+"
+EOF
+```
+
+
+## 1.3.5. Deploy TFS controller
+First, source the deployment settings defined in the previous section. This way, you do not need to specify the
+environment variables in each and every command you execute to operate the TFS controller. Be aware to re-source the
+file if you open new terminal sessions.
+Then, run the following command to deploy TeraFlowSDN controller on top of the MicroK8s Kubernetes platform.
+
+```bash
+cd ~/tfs-ctrl
+source my_deploy.sh
+./deploy.sh
+```
+
+The script does the following steps:
+1. Build the Docker images for the components defined in `TFS_COMPONENTS`
+2. Tag the Docker images with the value of `TFS_IMAGE_TAG`
+3. Push the Docker images to the repository defined in `TFS_REGISTRY_IMAGE`
+4. Create the namespace defined in `TFS_K8S_NAMESPACE`
+5. Deploy the components defined in `TFS_COMPONENTS`
+6. Create the file `tfs_runtime_env_vars.sh` with the environment variables for the components defined in
+   `TFS_COMPONENTS` defining their local host addresses and their port numbers.
+7. Create an ingress controller listening at port 80 for HTTP connections to enable external access to the TeraFlowSDN
+   WebUI, Grafana Dashboards, Context Debug endpoints, and Compute NBI interfaces.
+8. Initialize and configure the Grafana dashboards
+9. Report a summary of the deployment (see 
+   [1.5. Show Deployment and Log per Component](./1-5-deploy-logs-troubleshooting.md))
diff --git a/tutorial/1-4-access-webui.md b/tutorial/1-4-access-webui.md
new file mode 100644
index 0000000000000000000000000000000000000000..7769669e32d6c79aa330e56fd550c923580a149d
--- /dev/null
+++ b/tutorial/1-4-access-webui.md
@@ -0,0 +1,18 @@
+# 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards
+
+This section describes how to get access to the TeraFlowSDN controller WebUI and the monitoring Grafana dashboards.
+
+
+## 1.4.1. Access the TeraFlowSDN WebUI
+If you followed the installation steps based on MicroK8s, you got an ingress controller installed that exposes on TCP
+port 80. In the creation of the VM, a forward from local TCP port 8080 to VM's TCP port 80 is configured, so the WebUIs
+and REST APIs of TeraFlowSDN should be exposed on endpoint `127.0.0.1:8080`.
+Besides, the ingress controller defines the following reverse proxy paths:
+- `http://127.0.0.1:8080/webui`: points to the WebUI of TeraFlowSDN.
+- `http://127.0.0.1:8080/grafana`: points to the Grafana dashboards. This endpoint brings access to the monitoring
+  dashboards of TeraFlowSDN. The credentials for the `admin`user are those defined in the `my_deploy.sh` script, in the
+  `TFS_GRAFANA_PASSWORD` variable.
+- `http://127.0.0.1:8080/context`: points to the REST API exposed by the TeraFlowSDN Context component. This endpoint
+  is mainly used for debugging purposes. Note that this endpoint is designed to be accessed from the WebUI.
+- `http://127.0.0.1:8080/restconf`: points to the Compute component NBI based on RestCONF. This endpoint enables
+  connecting external software, such as ETSI OpenSourceMANO NFV Orchestrator, to TeraFlowSDN.
diff --git a/tutorial/1-5-deploy-logs-troubleshooting.md b/tutorial/1-5-deploy-logs-troubleshooting.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce16a279cdf6a716d157582f7a4fba0e707f2757
--- /dev/null
+++ b/tutorial/1-5-deploy-logs-troubleshooting.md
@@ -0,0 +1,32 @@
+# 1.5. Show Deployment and Log per Component
+
+This section presents some helper scripts to inspect the status of the deployment and the logs of the components. These
+scripts are particularly helpful for troubleshooting during execution of experiments, development, and debugging.
+
+
+## 1.5.1. Report the deployment of the TFS controller
+
+The summary report given at the end of the [Deploy TFS controller](./1-3-deploy-tfs.md#135-deploy-tfs-controller)
+procedure can be generated manually at any time by running the following command. You can avoid sourcing `my_deploy.sh`
+if it has been already done.
+```bash
+cd ~/tfs-ctrl
+source my_deploy.sh
+./show_deploy.sh
+```
+
+Use this script to validate that all the pods, deployments, replica sets, ingress controller, etc. are ready and have
+the appropriate state, e.g., "running" for Pods, and the services are deployed and have appropriate IP addresses and
+port numbers.
+
+
+## 1.5.2. Report the log of a specific TFS controller component
+
+A number of scripts are pre-created in the `scripts` folder to facilitate the inspection of the component logs. For
+instance, to dump the log of the Context component, run the following command. You can avoid sourcing `my_deploy.sh`
+if it has been already done.
+
+```bash
+source my_deploy.sh
+./scripts/show_logs_context.sh
+```
diff --git a/tutorial/2-0-run-experiments.md b/tutorial/2-0-run-experiments.md
new file mode 100644
index 0000000000000000000000000000000000000000..f87d00e98a66449f5fa6d267c527565b145722b2
--- /dev/null
+++ b/tutorial/2-0-run-experiments.md
@@ -0,0 +1,12 @@
+# 2. Run Experiments Guide (WORK IN PROGRESS)
+
+This section walks you through the process of running experiments in TeraFlowSDN on top of a Oracle VirtualBox-based VM
+running MicroK8s Kubernetes platform. The guide includes the details on configuring the Python environment, some basic
+commands you might need, configuring the network topology, and executing different experiments.
+
+## Table of Content:
+- [2.1. Configure the Python environment](./2-1-python-environment.md)
+- [2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services](./2-2-ofc22.md)
+- [2.3. OECC/PSC'22 Demo (WORK IN PROGRESS)](./2-3-oeccpsc22.md)
+- [2.4. ECOC'22 Demo (PENDING)](./2-4-ecoc22.md)
+- [2.5. NFV-SDN'22 Demo (PENDING)](./2-5-nfvsdn22.md)
diff --git a/tutorial/2-1-python-environment.md b/tutorial/2-1-python-environment.md
new file mode 100644
index 0000000000000000000000000000000000000000..4a818e9e7c0a2d4b4ef21ed48d04c84b339046fc
--- /dev/null
+++ b/tutorial/2-1-python-environment.md
@@ -0,0 +1,74 @@
+# 2.1. Configure Python Environment
+
+This section describes how to configure the Python environment to run experiments and develop code for the ETSI
+TeraFlowSDN controller.
+In particular, we use [PyEnv](https://github.com/pyenv/pyenv) to install the appropriate version of Python and manage
+the virtual environments.
+
+
+## 2.1.1. Upgrade the Ubuntu distribution
+Skip this step if you already did it during the creation of the VM.
+```bash
+sudo apt-get update -y
+sudo apt-get dist-upgrade -y
+```
+
+
+## 2.1.2. Install PyEnv dependencies in the VM
+```bash
+sudo apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget \
+    curl llvm git libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev
+```
+
+
+## 2.1.3. Install PyEnv
+```bash
+curl https://pyenv.run | bash
+# When finished, edit ~/.bash_profile // ~/.profile // ~/.bashrc as the installer proposes.
+# In general, it means to append the following lines to ~/.bashrc:
+export PYENV_ROOT="$HOME/.pyenv"
+command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"
+eval "$(pyenv init -)"
+eval "$(pyenv virtualenv-init -)"
+```
+
+
+## 2.1.4. Restart the VM
+Restart the VM for all the changes to take effect.
+```bash
+sudo reboot
+```
+
+
+## 2.1.5. Install Python 3.9 over PyEnv
+```bash
+pyenv install 3.9.13
+    # This command might take some minutes depending on your Internet connection speed and the performance of your VM.
+```
+
+
+## 2.1.6. Create the Virtual Environment for TeraFlowSDN
+The following commands create a virtual environment named as `tfs` using Python v3.9.13 and associate that environment
+with the current folder, i.e., `~/tfs-ctrl`. That way, when you are in that folder, the associated virtual environment
+will be used, thus inheriting the Python interpreter, i.e., Python v3.9.13, and the Python packages installed on it.
+
+```bash
+cd ~/tfs-ctrl
+pyenv virtualenv 3.9.13 tfs
+pyenv local 3.9.13/envs/tfs
+```
+
+After completing these commands, you should see in your prompt that now you're within the virtual environment
+`3.9.13/envs/tfs` on folder `~/tfs-ctrl`:
+```
+(3.9.13/envs/tfs) tfs@tfs-vm:~/tfs-ctrl$
+```
+
+
+## 2.1.7. Install the basic Python packages within the virtual environment
+From within the `3.9.13/envs/tfs` environment on folder `~/tfs-ctrl`, run the following commands to install the basic
+Python packages required to work with TeraFlowSDN.
+```bash
+cd ~/tfs-ctrl
+./install_requirements.sh
+```
diff --git a/tutorial/2-2-ofc22.md b/tutorial/2-2-ofc22.md
new file mode 100644
index 0000000000000000000000000000000000000000..993401480e9bdc87f0f4f9e4067addf49387bb15
--- /dev/null
+++ b/tutorial/2-2-ofc22.md
@@ -0,0 +1,121 @@
+# 2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services
+
+This functional test reproduces the live demonstration "Demonstration of Zero-touch Device and L3-VPN Service Management
+Using the TeraFlow Cloud-native SDN Controller" carried out at
+[OFC'22](https://www.ofcconference.org/en-us/home/program-speakers/demo/).
+
+
+## 2.2.1. Functional test folder
+
+This functional test can be found in folder `./src/tests/ofc22/`. A convenience alias `./ofc22/` pointing to that folder
+has been defined.
+
+
+## 2.2.2. Execute with real devices
+
+This functional test is designed to operate both with real and emulated devices.
+By default, emulated devices are used; however, if you have access to real devices, you can create/modify the files
+`./ofc22/tests/Objects.py` and `./ofc22/tests/Credentials.py` to point to your devices, and map to your own network
+topology.
+Otherwise, you can modify the `./ofc22/tests/descriptors_emulated.json` that is designed to be uploaded through the
+WebUI instead of using the command line scripts.
+Note that the default scenario assumes devices R2 and R4 are always emulated, while devices R1, R3, and O1
+can be configured as emulated or real devices.
+
+__Important__: The device drivers operating with real devices, e.g., OpenConfigDriver, P4Driver, and TrandportApiDriver,
+               have to be considered as experimental. The configuration and monitoring capabilities they support are
+               limited or partially implemented/tested. Use them with care.
+
+
+## 2.2.3. Deployment and Dependencies
+
+To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN
+controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured
+the Python environment as described in
+[Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
+Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal
+you open.
+
+
+## 2.2.4. Access to the WebUI and Dashboard
+
+When the deployment completes, you can connect to the TeraFlowSDN WebUI and Dashboards as described in
+[Tutorial: Deployment Guide > 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md)
+
+Notes:
+- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`.
+- in Grafana, you will find the "L3-Monitorng" in the "Starred dashboards" section.
+
+
+## 2.2.5. Test execution
+
+To execute this functional test, four main steps needs to be carried out:
+1. Device bootstrapping
+2. L3VPN Service creation
+3. L3VPN Service removal
+4. Cleanup
+
+Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there
+is some error during the execution, you should see a detailed report on the error. See the troubleshooting section if
+needed.
+
+Feel free to check the logs of the different components using the appropriate `scripts/show_logs_[component].sh`
+scripts after you execute each step.
+
+
+### 2.2.5.1. Device bootstrapping
+
+This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The
+expected results are:
+- The devices to be added into the Topology.
+- The devices to be pre-configured and initialized as ENABLED by the Automation component.
+- The monitoring for the device ports (named as endpoints in TeraFlowSDN) to be activated and data collection to
+  automatically start.
+- The links to be added to the topology.
+
+To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that
+contains the descriptors of the contexts, topologies, devices, and links, or by executing the
+`./ofc22/run_test_01_bootstrap.sh` script.
+
+When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data
+being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a
+0-valued flat plot.
+
+In the WebUI, select the "admin" Context. Then, in the "Devices" tab you should see that 5 different emulated devices
+have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab
+you should see that there is no service created. Note here that the emulated devices produce synthetic
+randomly-generated data and do not care about the services configured.
+
+
+### 2.2.5.2. L3VPN Service creation
+
+This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance.
+
+To run this step, execute the `./ofc22/run_test_02_create_service.sh` script.
+
+When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for
+the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration
+rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured,
+you should see the plots with the monitored data for the device. By default, device R1-EMU is selected.
+
+
+### 2.2.5.3. L3VPN Service removal
+
+This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock
+OSM instance.
+
+To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete the L3NM service from the WebUI.
+
+When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed.
+Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the
+Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again.
+
+
+### 2.2.5.4. Cleanup
+
+This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness.
+
+To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script.
+
+When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in
+the "Services" tab you can see that the "admin" Context has no services given that that context has been removed.
diff --git a/src/tests/oeccpsc22/README.md b/tutorial/2-3-oeccpsc22.md
similarity index 53%
rename from src/tests/oeccpsc22/README.md
rename to tutorial/2-3-oeccpsc22.md
index 42e0228a52bdf9dfc21bc0358b78fb98677ed458..2ea7261d8a032b6543b3f3e9ed2fa702d9066616 100644
--- a/src/tests/oeccpsc22/README.md
+++ b/tutorial/2-3-oeccpsc22.md
@@ -1,8 +1,8 @@
-# OECC/PSC'22 Paper - Interdomain slices
-This functional test reproduces the experiment in paper "... paper title ..." presented at OECC/PSC'22 conference
-[OECC/PSC'22](... demo link ...).
-
-## Functional test folder
-This functional test can be found in folder `./src/tests/oeccpsc22/`. A convenience alias `./oeccpsc22/` pointing to that folder has been defined.
-
-# TO BE WRITTEN
+# 2.3. OECC/PSC'22 Demo - Interdomain slices (WORK IN PROGRESS)
+
+This functional test reproduces the experiment in paper "... paper title ..." presented at OECC/PSC'22 conference
+[OECC/PSC'22](... demo link ...).
+
+## 2.3.1. Functional test folder
+This functional test can be found in folder `./src/tests/oeccpsc22/`. A convenience alias `./oeccpsc22/` pointing to
+that folder has been defined.
diff --git a/tutorial/2-4-ecoc22.md b/tutorial/2-4-ecoc22.md
new file mode 100644
index 0000000000000000000000000000000000000000..f752bda840a3eb2fbde6c907e4ce139de3f8ce82
--- /dev/null
+++ b/tutorial/2-4-ecoc22.md
@@ -0,0 +1 @@
+# 2.4. ECOC'22 Demo (PENDING)
diff --git a/tutorial/2-5-nfvsdn22.md b/tutorial/2-5-nfvsdn22.md
new file mode 100644
index 0000000000000000000000000000000000000000..35fae3af31420f401997377d9b10a47acc92d490
--- /dev/null
+++ b/tutorial/2-5-nfvsdn22.md
@@ -0,0 +1 @@
+# 2.5. NFV-SDN'22 Demo (PENDING)
diff --git a/tutorial/3-0-development.md b/tutorial/3-0-development.md
new file mode 100644
index 0000000000000000000000000000000000000000..0e2d1a03f21bbabcfeab46422b52903d2b5a53a7
--- /dev/null
+++ b/tutorial/3-0-development.md
@@ -0,0 +1,9 @@
+# 3. Development Guide (WORK IN PROGRESS)
+
+This section walks you through the process of developing new components for the TeraFlowSDN controller. For convenience,
+this guide assumes you are using the Oracle VirtualBox-based VM running MicroK8s Kubernetes platform as described in the
+[Deployment Guide](./1-0-deployment.md). The guide includes the details on 
+
+## Table of Content:
+- [3.1. Configure VSCode and Connect to the VM](./3-1-configure-vscode.md)
+- [3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS)](./3-2-develop-cth.md)
diff --git a/tutorial/3-1-configure-vscode.md b/tutorial/3-1-configure-vscode.md
new file mode 100644
index 0000000000000000000000000000000000000000..a2586142f1ca18897c25168c610fcecb0ea3ebcd
--- /dev/null
+++ b/tutorial/3-1-configure-vscode.md
@@ -0,0 +1,77 @@
+# 3.1. Configure VSCode and Connect to the VM
+
+
+## 3.1.1. Install VSCode and the required extensions
+If not already done, install [VSCode](https://code.visualstudio.com/) and the "Remote SSH" extension on your local
+machine, not in the VM.
+
+__Note__: "Python" extension is not required here. It will be installed later on the VSCode server running on the VM.
+
+
+## 3.1.2. Configure the "Remote SSH" extension
+- Go to left icon "Remote Explorer"
+- Click the "gear" icon next to "SSH TARGETS" on top of "Remote Explorer" bar
+- Choose to edit "<...>/.ssh/config" file (or equivalent)
+- Add the following entry (assuming previous port forwarding configuration):
+```
+Host TFS-VM
+    HostName 127.0.0.1
+    Port 2200
+    ForwardX11 no
+    User tfs
+```
+- Save the file
+- An entry "TFS-VM" should appear on "SSH TARGETS".
+
+
+## 3.1.3. Connect VSCode to the VM through "Remote SSH" extension
+- Right-click on "TFS-VM"
+- Select "Connect to Host in Current Window"
+- Reply to the questions asked
+  - Platform of the remote host "TFS-VM": Linux
+  - "TFS-VM" has fingerprint "<fingerprint>". Do you want to continue?: Continue
+  - Type tfs user's password: tfs123
+- You should be now connected to the TFS-VM.
+
+__Note__: if you get a connection error message, the reason might be due to wrong SSH server fingerprint. Edit file
+          "<...>/.ssh/known_hosts" on your local user account, check if there is a line starting with
+          "[127.0.0.1]:2200" (assuming previous port forwarding configuration), remove the entire line, save the file,
+          and retry connection.
+
+
+## 3.1.4. Add SSH key to prevent typing the password every time
+This step creates an SSH key in the VM and installs it on the VSCode to prevent having to type the password every time.
+
+- In VSCode (connected to the VM), click menu "Terminal > New Terminal"
+- Run the following commands on the VM's terminal through VSCode
+```bash
+ssh-keygen -t rsa -b 4096 -f ~/.ssh/tfs-vm.key
+  # leave password empty
+ssh-copy-id -i ~/.ssh/tfs-vm.key.pub tfs@10.0.2.10
+  # tfs@10.0.2.10's password: <type tfs user's password: tfs123>
+rm .ssh/known_hosts 
+```
+
+- In VSCode, click left "Explorer" panel to expand, if not expanded, and click "Open Folder" button.
+  - Choose "/home/tfs/"
+  - Type tfs user's password when asked
+  - Trust authors of the "/home/tfs [SSH: TFS-VM]" folder when asked
+- Right click on the file "tfs-vm.key" in the file explorer
+  - Select "Download..." option
+  - Download the file into your user's accout ".ssh" folder
+- Delete files "tfs-vm.key" and "tfs-vm.key.pub" on the TFS-VM.
+
+- In VSCode, click left "Remote Explorer" panel to expand
+  - Click the "gear" icon next to "SSH TARGETS" on top of "Remote Explorer" bar
+  - Choose to edit "<...>/.ssh/config" file (or equivalent)
+  - Find entry "Host TFS-VM" and update it as follows:
+```
+Host TFS-VM
+    HostName 127.0.0.1
+    Port 2200
+    ForwardX11 no
+    User tfs
+    IdentityFile "<path to the downloaded identity private key file>"
+```
+  - Save the file
+- From now, VSCode will use the identity file to connect to the TFS-VM instead of the user's password.
diff --git a/tutorial/3-2-develop-cth.md b/tutorial/3-2-develop-cth.md
new file mode 100644
index 0000000000000000000000000000000000000000..983e862d7c5e0ba06583739f797062ff9b9b32fc
--- /dev/null
+++ b/tutorial/3-2-develop-cth.md
@@ -0,0 +1,18 @@
+# 3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS)
+
+
+
+## Items to be addressed:
+- pytest flags: --log-level=INFO --verbose -o log_cli=true -o log_file=my_log_file.log -o log_file_level=DEBUG
+- code coverage
+- nginx ingress grpc to enable access from external sources
+
+
+# interesting Docker commands
+
+Build by hand:
+cd src
+docker build -t "context:lgr-test" -f ./context/Dockerfile .
+
+Run by hand:
+docker run --rm --name lgr-test -it --env "DB_BACKEND=inmemory" --entrypoint /bin/bash context:lgr-test
diff --git a/tutorial/README.md b/tutorial/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..836434e51b8797cf91a49a3f47298eec712fbe43
--- /dev/null
+++ b/tutorial/README.md
@@ -0,0 +1,39 @@
+# ETSI TeraFlowSDN Controller Tutorials
+
+
+## Abstract
+This document provides a walkthrough on how to prepare your environment for executing and contributing to the
+[ETSI TeraFlowSDN OSG](https://tfs.etsi.org/).
+
+This walkthrough makes some reasonable assumptions to simplify the deployment of the ETSI TeraFlowSDN controller, the
+execution of experiments and tests, and development of new contributions. In particular, we assume:
+
+- [VirtualBox](https://www.virtualbox.org/) version 6.1.34 r150636
+- [VSCode](https://code.visualstudio.com/) with the "Remote SSH" extension
+- VM software:
+  - [Ubuntu Server 20.04 LTS](https://releases.ubuntu.com/20.04/)
+  - [MicroK8s](https://microk8s.io/)
+
+
+## Contact
+If your environment does not fit with the proposed assumptions and you experience some trouble preparing it to work
+with the ETSI TeraFlowSDN controller, contact the ETSI TeraFlowSDN OSG team through
+[Slack](https://join.slack.com/t/teraflowsdn/shared_invite/zt-18gc5jvkh-1_DEZHFhxeuOqzJZPq~U~A)
+
+
+## Table of Content:
+- [1. Deployment Guide](./1-0-deployment.md)
+  - [1.1. Create VM for the TeraFlowSDN controller](./1-1-create-vm.md)
+  - [1.2. Install MicroK8s Kubernetes platform](./1-2-install-microk8s.md)
+  - [1.3. Deploy TeraFlowSDN over MicroK8s](./1-3-deploy-tfs.md)
+  - [1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md)
+  - [1.5. Show Deployment and Log per Component](./1-5-deploy-logs-troubleshooting.md)
+- [2. Run Experiments Guide (WORK IN PROGRESS)](./2-0-run-experiments.md)
+  - [2.1. Configure the Python environment](./2-1-python-environment.md)
+  - [2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services](./2-2-ofc22.md)
+  - [2.3. OECC/PSC'22 Demo (WORK IN PROGRESS)](./2-3-oeccpsc22.md)
+  - [2.4. ECOC'22 Demo (PENDING)](./2-4-ecoc22.md)
+  - [2.5. NFV-SDN'22 Demo (PENDING)](./2-5-nfvsdn22.md)
+- [3. Development Guide (WORK IN PROGRESS)](./3-0-development.md)
+  - [3.1. Configure VSCode and Connect to the VM](./3-1-configure-vscode.md)
+  - [3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS)](./3-2-develop-cth.md)