diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 37d5fa2b54e2bd779a5ef64ebdd03cf763635a69..edc86711ef67083fd289c6e682399e3a2f364734 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -16,46 +16,48 @@
 stages:
   #- dependencies
   - build
+  - prepare
   - unit_test
   - end2end_test
 
 # include the individual .gitlab-ci.yml of each micro-service and tests
 include:
   #- local: '/manifests/.gitlab-ci.yml'
-  - local: '/src/monitoring/.gitlab-ci.yml'
-  - local: '/src/nbi/.gitlab-ci.yml'
-  - local: '/src/context/.gitlab-ci.yml'
-  - local: '/src/device/.gitlab-ci.yml'
-  - local: '/src/service/.gitlab-ci.yml'
-  - local: '/src/dbscanserving/.gitlab-ci.yml'
-  - local: '/src/opticalattackmitigator/.gitlab-ci.yml'
-  - local: '/src/opticalattackdetector/.gitlab-ci.yml'
-  - local: '/src/opticalattackmanager/.gitlab-ci.yml'
-  - local: '/src/opticalcontroller/.gitlab-ci.yml'
-  - local: '/src/ztp/.gitlab-ci.yml'
-  - local: '/src/policy/.gitlab-ci.yml'
-  - local: '/src/automation/.gitlab-ci.yml'
-  - local: '/src/forecaster/.gitlab-ci.yml'
+  #- local: '/src/monitoring/.gitlab-ci.yml'
+  #- local: '/src/nbi/.gitlab-ci.yml'
+  #- local: '/src/context/.gitlab-ci.yml'
+  #- local: '/src/device/.gitlab-ci.yml'
+  #- local: '/src/service/.gitlab-ci.yml'
+  #- local: '/src/qkd_app/.gitlab-ci.yml'
+  #- local: '/src/dbscanserving/.gitlab-ci.yml'
+  #- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
+  #- local: '/src/opticalattackdetector/.gitlab-ci.yml'
+  #- local: '/src/opticalattackmanager/.gitlab-ci.yml'
+  #- local: '/src/opticalcontroller/.gitlab-ci.yml'
+  #- local: '/src/ztp/.gitlab-ci.yml'
+  #- local: '/src/policy/.gitlab-ci.yml'
+  #- local: '/src/automation/.gitlab-ci.yml'
+  #- local: '/src/forecaster/.gitlab-ci.yml'
   #- local: '/src/webui/.gitlab-ci.yml'
   #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
   #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
   #- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
-  - local: '/src/slice/.gitlab-ci.yml'
+  #- local: '/src/slice/.gitlab-ci.yml'
   #- local: '/src/interdomain/.gitlab-ci.yml'
-  - local: '/src/pathcomp/.gitlab-ci.yml'
+  #- local: '/src/pathcomp/.gitlab-ci.yml'
   #- local: '/src/dlt/.gitlab-ci.yml'
-  - local: '/src/load_generator/.gitlab-ci.yml'
-  - local: '/src/bgpls_speaker/.gitlab-ci.yml'
-  - local: '/src/kpi_manager/.gitlab-ci.yml'
-  - local: '/src/kpi_value_api/.gitlab-ci.yml'
+  #- local: '/src/load_generator/.gitlab-ci.yml'
+  #- local: '/src/bgpls_speaker/.gitlab-ci.yml'
+  #- local: '/src/kpi_manager/.gitlab-ci.yml'
+  #- local: '/src/kpi_value_api/.gitlab-ci.yml'
   #- local: '/src/kpi_value_writer/.gitlab-ci.yml'
   #- local: '/src/telemetry/.gitlab-ci.yml'
-  - local: '/src/analytics/.gitlab-ci.yml'
-  - local: '/src/qos_profile/.gitlab-ci.yml'
-  - local: '/src/vnt_manager/.gitlab-ci.yml'
-  - local: '/src/e2e_orchestrator/.gitlab-ci.yml'
-  - local: '/src/ztp_server/.gitlab-ci.yml'
-  - local: '/src/osm_client/.gitlab-ci.yml'
+  #- local: '/src/analytics/.gitlab-ci.yml'
+  #- local: '/src/qos_profile/.gitlab-ci.yml'
+  #- local: '/src/vnt_manager/.gitlab-ci.yml'
+  #- local: '/src/e2e_orchestrator/.gitlab-ci.yml'
+  #- local: '/src/ztp_server/.gitlab-ci.yml'
+  #- local: '/src/osm_client/.gitlab-ci.yml'
 
   # This should be last one: end-to-end integration tests
   - local: '/src/tests/.gitlab-ci.yml'
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index 5592864d68485086b760ad3bee06353847ca4c56..2bf8b5d863b055f8efdd500f23722d7b619ae744 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -40,7 +40,7 @@ spec:
             - name: MB_BACKEND
               value: "nats"
             - name: LOG_LEVEL
-              value: "INFO"
+              value: "DEBUG"
             - name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY
               value: "FALSE"
             - name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index a366a50414e28c073cf51b860ce84a1abb0293e8..7c3ded7c00011bc937b0b3d2df9e3935f9f43b10 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -39,7 +39,7 @@ spec:
             - containerPort: 9192
           env:
             - name: LOG_LEVEL
-              value: "INFO"
+              value: "DEBUG"
           startupProbe:
             exec:
               command: ["/bin/grpc_health_probe", "-addr=:2020"]
diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml
index cead19406afd01718f8f3d105fabd3cc4754356b..6a07478c0f229cc9ad85cc1ad68f5f69a77eaacf 100644
--- a/manifests/nbiservice.yaml
+++ b/manifests/nbiservice.yaml
@@ -39,9 +39,9 @@ spec:
             #- containerPort: 9192
           env:
             - name: LOG_LEVEL
-              value: "INFO"
+              value: "DEBUG"
             - name: FLASK_ENV
-              value: "production"  # change to "development" if developing
+              value: "development"  # change to "development" if developing
             - name: IETF_NETWORK_RENDERER
               value: "LIBYANG"
           envFrom:
diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml
index 2db0d41b0da09ab768c7b7c3e48350e314974369..71c7e4cd70a4003f5c67781a304d7386505a983a 100644
--- a/manifests/pathcompservice.yaml
+++ b/manifests/pathcompservice.yaml
@@ -36,7 +36,7 @@ spec:
             - containerPort: 9192
           env:
             - name: LOG_LEVEL
-              value: "INFO"
+              value: "DEBUG"
             - name: ENABLE_FORECASTER
               value: "NO"
           readinessProbe:
diff --git a/manifests/qkd_appservice.yaml b/manifests/qkd_appservice.yaml
index 79cfaf6bd09d13d1c3f1202a7fdcc5b7e71e4470..7641bd3aa86d43896aac9946b61f866dd295aef5 100644
--- a/manifests/qkd_appservice.yaml
+++ b/manifests/qkd_appservice.yaml
@@ -36,7 +36,7 @@ spec:
             - containerPort: 9192
           env:
             - name: LOG_LEVEL
-              value: "INFO"
+              value: "DEBUG"
             - name: CRDB_DATABASE
               value: "tfs_qkd_app"
           envFrom:
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 8262550efc31ff5c6d8d660cab7206c31c1bc86e..8615e8879a6c0b6b10b1d17f76287710443e674c 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -36,7 +36,7 @@ spec:
             - containerPort: 9192
           env:
             - name: LOG_LEVEL
-              value: "INFO"
+              value: "DEBUG"
           readinessProbe:
             exec:
               command: ["/bin/grpc_health_probe", "-addr=:3030"]
diff --git a/my_deploy.sh b/my_deploy.sh
index 4d3820f41affacdb5aea743e3f4cedc310442a05..969caa94fd930da516a2d6540f4520cb48e9a96a 100644
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -80,11 +80,11 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui"
 #   To manage QKD Apps, "service" requires "qkd_app" to be deployed
 #   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
 #   "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
-#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
-#    BEFORE="${TFS_COMPONENTS% service*}"
-#    AFTER="${TFS_COMPONENTS#* service}"
-#    export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
-#fi
+if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+    BEFORE="${TFS_COMPONENTS% service*}"
+    AFTER="${TFS_COMPONENTS#* service}"
+    export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
+fi
 
 # Uncomment to activate Load Generator
 #export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator"
@@ -137,7 +137,7 @@ export CRDB_DEPLOY_MODE="single"
 export CRDB_DROP_DATABASE_IF_EXISTS=""
 
 # Disable flag for re-deploying CockroachDB from scratch.
-export CRDB_REDEPLOY=""
+export CRDB_REDEPLOY="YES"
 
 
 # ----- NATS -------------------------------------------------------------------
diff --git a/src/device/.gitlab-ci.yml b/src/device/.gitlab-ci.yml
index 6be3b5bdf8149df50d8d4d165d8b8277f259fb48..f1355fd4574afb89432bef778154d8b186fdaaef 100644
--- a/src/device/.gitlab-ci.yml
+++ b/src/device/.gitlab-ci.yml
@@ -12,11 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+stages:
+  - build
+  - prepare
+  - unit_test
+
 # Build, tag, and push the Docker image to the GitLab Docker registry
-build device:
+build_device:
   variables:
-    IMAGE_NAME: 'device' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+    IMAGE_NAME: 'device'
+    IMAGE_TAG: 'latest'
   stage: build
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
@@ -32,113 +37,136 @@ build device:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
     - changes:
-      - src/common/**/*.py
-      - proto/*.proto
-      - src/$IMAGE_NAME/**/*.{py,in,yml}
-      - src/$IMAGE_NAME/Dockerfile
-      - src/$IMAGE_NAME/tests/*.py
-      - manifests/${IMAGE_NAME}service.yaml
-      - .gitlab-ci.yml
+        - src/common/**/*.py
+        - proto/*.proto
+        - src/$IMAGE_NAME/**/*.{py,in,yml}
+        - src/$IMAGE_NAME/Dockerfile
+        - src/$IMAGE_NAME/tests/*.py
+        - manifests/${IMAGE_NAME}service.yaml
+        - .gitlab-ci.yml
 
-## Start Mock QKD Nodes before unit testing
-#start_mock_nodes:
-#  stage: deploy
-#  script:
-#    - bash src/tests/tools/mock_qkd_nodes/start.sh &
-#    - sleep 10 # wait for nodes to spin up
-#  artifacts:
-#    paths:
-#      - mock_nodes.log
-#  rules:
-#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+# Deploy mock QKD nodes
+prepare_mock_qkd_nodes:
+  stage: prepare
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge network is already created"; else docker network create --driver=bridge teraflowbridge; fi
+    - |
+      # Context-related cleanup
+      if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+      if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+      if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
 
-## Prepare Scenario (Start NBI, mock services)
-#prepare_scenario:
-#  stage: deploy
-#  script:
-#    - pytest src/tests/qkd/unit/PrepareScenario.py
-#  needs:
-#    - start_mock_nodes
-#  rules:
-#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+  script:
+    - docker volume create crdb
+    - docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:latest-v22.2 start-single-node
+    - docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 nats:2.9 --http_port 8222 --user tfs --pass tfs123
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user "tfs"'; do sleep 1; done
+    - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
+    - MOCK_NODES_DIR="$CI_PROJECT_DIR/controller/src/tests/tools/mock_qkd_nodes"
+    - |
+      if [ -d "$MOCK_NODES_DIR" ]; then
+        cd "$MOCK_NODES_DIR" && ./start.sh &
+        MOCK_NODES_PID=$!
+      else
+        echo "Error: Mock QKD nodes directory '$MOCK_NODES_DIR' not found." && exit 1;
+      fi
+    - echo "Waiting for mock nodes to be up..."
+    - RETRY_COUNT=0
+    - MAX_RETRIES=15
+    - |
+      while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
+        if curl -s http://127.0.0.1:11111 > /dev/null && \
+           curl -s http://127.0.0.1:22222 > /dev/null && \
+           curl -s http://127.0.0.1:33333 > /dev/null; then
+            echo "Mock nodes are up!"
+            break
+        else
+            echo "Mock nodes not ready, retrying in 5 seconds..."
+            RETRY_COUNT=$((RETRY_COUNT + 1))
+            sleep 5
+        fi
+      done
+    - |
+      if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then
+        echo "Error: Mock nodes failed to start after multiple attempts."
+        exit 1
+      fi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+        - src/common/**/*.py
+        - proto/*.proto
+        - src/device/**/*.{py,in,yml}
+        - src/device/Dockerfile
+        - src/device/tests/*.py
+        - src/tests/tools/mock_qkd_nodes/**
+        - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit_test device:
+unit_test_device:
   variables:
-    IMAGE_NAME: 'device' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+    IMAGE_NAME: 'device'
+    IMAGE_TAG: 'latest'
   stage: unit_test
   needs:
-    - build device
-    #- start_mock_nodes
-    #- prepare_scenario
+    - build_device
+    - prepare_mock_qkd_nodes
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
-    - >
-      if docker network list | grep teraflowbridge; then
-        echo "teraflowbridge is already created";
-      else
-        docker network create -d bridge teraflowbridge;
-      fi
-    - >
-      if docker container ls | grep $IMAGE_NAME; then
-        docker rm -f $IMAGE_NAME;
-      else
-        echo "$IMAGE_NAME image is not in the system";
-      fi
   script:
     - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
-    - docker run --name $IMAGE_NAME -d -p 2020:2020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker run --name $IMAGE_NAME --network=teraflowbridge -d -p 2020:2020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" -e PYTHONPATH="/var/teraflow:/var/teraflow/device:/var/teraflow/tests/tools/mock_qkd_nodes:/var/teraflow/tests" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
     - sleep 5
     - docker ps -a
     - docker logs $IMAGE_NAME
-    - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_emulated.py --junitxml=/opt/results/${IMAGE_NAME}_report_emulated.xml"
-    - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_ietf_actn.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_actn.xml"
-    #- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/qkd/unit/test_*.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_qkd_mock_connectivity.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_qkd_compliance.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_mock_qkd_node.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_qkd_error_handling.py"
     - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
     - docker rm -f $IMAGE_NAME
+    - docker rm -f pathcomp-frontend pathcomp-backend device context crdb nats
+    - docker volume rm -f crdb
     - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
     - changes:
-      - src/common/**/*.py
-      - proto/*.proto
-      - src/$IMAGE_NAME/**/*.{py,in,yml}
-      - src/$IMAGE_NAME/Dockerfile
-      - src/$IMAGE_NAME/tests/*.py
-      - src/$IMAGE_NAME/tests/Dockerfile
-      - manifests/${IMAGE_NAME}service.yaml
-      - .gitlab-ci.yml
+        - src/common/**/*.py
+        - proto/*.proto
+        - src/$IMAGE_NAME/**/*.{py,in,yml}
+        - src/$IMAGE_NAME/Dockerfile
+        - src/$IMAGE_NAME/tests/*.py
+        - src/$IMAGE_NAME/tests/Dockerfile
+        - src/tests/tools/mock_qkd_nodes/**
+        - manifests/${IMAGE_NAME}service.yaml
+        - .gitlab-ci.yml
   artifacts:
-      when: always
-      reports:
-        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml
+    when: always
+    reports:
+      junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml
 
 ## Deployment of the service in Kubernetes Cluster
-#deploy device:
+#deploy_device:
 #  variables:
 #    IMAGE_NAME: 'device' # name of the microservice
 #    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
 #  stage: deploy
 #  needs:
-#    - unit test device
-#    # - integ_test execute
+#    - unit_test_device
 #  script:
 #    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
 #    - kubectl version
 #    - kubectl get all
 #    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
 #    - kubectl get all
-#  # environment:
-#  #   name: test
-#  #   url: https://example.com
-#  #   kubernetes:
-#  #     namespace: test
 #  rules:
 #    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
 #      when: manual    
diff --git a/src/device/tests/qkd/unit/test_mock_qkd_node.py b/src/device/tests/qkd/unit/test_mock_qkd_node.py
index 2ec060e873b311a38d1c0b91ae533d794bbddb69..2eef0f6e440c7bd473d7cc80a8e79427a598102a 100644
--- a/src/device/tests/qkd/unit/test_mock_qkd_node.py
+++ b/src/device/tests/qkd/unit/test_mock_qkd_node.py
@@ -16,16 +16,20 @@ import pytest
 import requests
 from requests.exceptions import ConnectionError
 
+MOCK_QKD_ADDRESS = '127.0.0.1'
+MOCK_PORT = 11111
+
+
 def test_mock_qkd_node_responses():
-    response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
+    response = requests.get(f'http://{MOCK_QKD_ADDRESS}:{MOCK_PORT}/restconf/data/etsi-qkd-sdn-node:qkd_node')
     assert response.status_code == 200
     data = response.json()
     assert 'qkd_node' in data
 
 def test_mock_node_failure_scenarios():
     try:
-        response = requests.get('http://127.0.0.1:12345/restconf/data/etsi-qkd-sdn-node:qkd_node')
+        response = requests.get(f'http://{MOCK_QKD_ADDRESS}:12345/restconf/data/etsi-qkd-sdn-node:qkd_node')
     except ConnectionError as e:
         assert isinstance(e, ConnectionError)
     else:
-        pytest.fail("ConnectionError not raised as expected")
+        pytest.fail("ConnectionError not raised as expected")
\ No newline at end of file
diff --git a/src/device/tests/qkd/unit/test_qkd_compliance.py b/src/device/tests/qkd/unit/test_qkd_compliance.py
index e0cfe90cdfab2a060cd4b5ae968583af1da750de..4dec4ab6a35a8618947355e29e69c5cd780728e2 100644
--- a/src/device/tests/qkd/unit/test_qkd_compliance.py
+++ b/src/device/tests/qkd/unit/test_qkd_compliance.py
@@ -15,10 +15,18 @@
 
 import pytest
 import requests
-from tests.tools.mock_qkd_nodes.YangValidator import YangValidator
+from requests.exceptions import HTTPError
+from tests.tools.mock_qkd_node.YangValidator import YangValidator
 
 def test_compliance_with_yang_models():
     validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
-    response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
-    data = response.json()
-    assert validator.parse_to_dict(data) is not None
+    try:
+        response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
+        response.raise_for_status()
+        data = response.json()
+        assert validator.parse_to_dict(data) is not None, "Data validation failed against YANG model."
+    except HTTPError as e:
+        pytest.fail(f"HTTP error occurred: {e}")
+    except Exception as e:
+        pytest.fail(f"Unexpected error occurred: {e}")
+
diff --git a/src/device/tests/qkd/unit/test_qkd_error_hanling.py b/src/device/tests/qkd/unit/test_qkd_error_handling.py
similarity index 96%
rename from src/device/tests/qkd/unit/test_qkd_error_hanling.py
rename to src/device/tests/qkd/unit/test_qkd_error_handling.py
index 5d847ac381d34dd9457591cd416da5040fe8b115..4d674f109c0cd4fdf148f1297b89f6511a6e87d4 100644
--- a/src/device/tests/qkd/unit/test_qkd_error_hanling.py
+++ b/src/device/tests/qkd/unit/test_qkd_error_handling.py
@@ -40,7 +40,7 @@ def test_invalid_operations_on_network_links(qkd_driver):
 
     try:
         # Attempt to perform an invalid operation (simulate wrong resource key)
-        response = requests.post(f'http://{qkd_driver.address}/invalid_resource', json=invalid_payload)
+        response = requests.post(f'http://{qkd_driver.address}:{qkd_driver.port}/invalid_resource', json=invalid_payload)
         response.raise_for_status()
 
     except HTTPError as e:
diff --git a/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
index 05b589e3882f08e22959fc57383f0a57619cb32b..a2d59fcb3e14a608810d1b4770f2525a8ab41acc 100644
--- a/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
+++ b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
@@ -12,16 +12,35 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import pytest, requests
+import pytest
+import requests
+import time
+import socket
 from unittest.mock import patch
-from device.service.drivers.qkd.QKDDriver import QKDDriver
+from device.service.drivers.qkd.QKDDriver2 import QKDDriver
 
-MOCK_QKD_ADDRRESS = '127.0.0.1'
+MOCK_QKD_ADDRESS = '127.0.0.1'  # Use localhost to connect to the mock node in the Docker container
 MOCK_PORT = 11111
 
+@pytest.fixture(scope="module")
+def wait_for_mock_node():
+    """
+    Fixture to wait for the mock QKD node to be ready before running tests.
+    """
+    timeout = 30  # seconds
+    start_time = time.time()
+    while True:
+        try:
+            with socket.create_connection((MOCK_QKD_ADDRESS, MOCK_PORT), timeout=1):
+                break  # Success
+        except (socket.timeout, socket.error):
+            if time.time() - start_time > timeout:
+                raise RuntimeError("Timed out waiting for mock QKD node to be ready.")
+            time.sleep(1)
+
 @pytest.fixture
-def qkd_driver():
-    return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass')
+def qkd_driver(wait_for_mock_node):
+    return QKDDriver(address=MOCK_QKD_ADDRESS, port=MOCK_PORT, username='user', password='pass')
 
 # Deliverable Test ID: SBI_Test_01
 def test_qkd_driver_connection(qkd_driver):
@@ -29,7 +48,7 @@ def test_qkd_driver_connection(qkd_driver):
 
 # Deliverable Test ID: SBI_Test_01
 def test_qkd_driver_invalid_connection():
-    qkd_driver = QKDDriver(address='127.0.0.1', port=12345, username='user', password='pass')  # Use invalid port directly
+    qkd_driver = QKDDriver(address=MOCK_QKD_ADDRESS, port=12345, username='user', password='pass')  # Use invalid port directly
     assert qkd_driver.Connect() is False
 
 # Deliverable Test ID: SBI_Test_10
@@ -38,4 +57,3 @@ def test_qkd_driver_timeout_connection(mock_get, qkd_driver):
     mock_get.side_effect = requests.exceptions.Timeout
     qkd_driver.timeout = 0.001  # Simulate very short timeout
     assert qkd_driver.Connect() is False
-
diff --git a/src/nbi/service/qkd_app/Resources.py b/src/nbi/service/qkd_app/Resources.py
index fb4ec45d4700260fd3211a332f74954100aaf4c9..03a3d2fd21aaec309131947debb394af8d2bc694 100644
--- a/src/nbi/service/qkd_app/Resources.py
+++ b/src/nbi/service/qkd_app/Resources.py
@@ -12,16 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import uuid
-import json
+import json, logging
 from flask import request
+from flask.json import jsonify
 from flask_restful import Resource
 from common.proto.context_pb2 import Empty
 from common.proto.qkd_app_pb2 import App, QKDAppTypesEnum
 from common.Constants import DEFAULT_CONTEXT_NAME
 from context.client.ContextClient import ContextClient
+from nbi.service._tools.HttpStatusCodes import HTTP_OK, HTTP_SERVERERROR
 from qkd_app.client.QKDAppClient import QKDAppClient
 
+LOGGER = logging.getLogger(__name__)
+
 class _Resource(Resource):
     def __init__(self) -> None:
         super().__init__()
@@ -30,7 +33,7 @@ class _Resource(Resource):
 
 class Index(_Resource):
     def get(self):
-        return {'hello': 'world'}
+        return {}
 
 class ListDevices(_Resource):
     def get(self):
@@ -81,18 +84,24 @@ class CreateQKDApp(_Resource):
         devices = self.context_client.ListDevices(Empty()).devices
         local_device = None
 
+        local_qkdn_id = app['local_qkdn_id']
+
         # This for-loop won't be necessary if Device ID is guaranteed to be the same as QKDN Id
         for device in devices:
             for config_rule in device.device_config.config_rules:
                 if config_rule.custom.resource_key == '__node__':
                     value = json.loads(config_rule.custom.resource_value)
-                    qkdn_id = value['qkdn_id']
-                    if app['local_qkdn_id'] == qkdn_id:
-                        local_device = device
+                    if local_qkdn_id != value['qkdn_id']: continue
+                    local_device = device
                     break
 
         if local_device is None:
-            return {"status": "fail"}
+            MSG = 'Unable to find local_device for local_qkdn_id({:s})'
+            msg = MSG.format(str(local_qkdn_id))
+            LOGGER.exception(msg)
+            response = jsonify({'error': msg})
+            response.status_code = HTTP_SERVERERROR
+            return response
 
         external_app_src_dst = {
             'app_id': {'context_id': {'context_uuid': {'uuid': DEFAULT_CONTEXT_NAME}}, 'app_uuid': {'uuid': ''}},
@@ -107,5 +116,6 @@ class CreateQKDApp(_Resource):
 
         self.qkd_app_client.RegisterApp(App(**external_app_src_dst))
 
-        return {"status": "success"}
-
+        response = jsonify({'status': 'success'})
+        response.status_code = HTTP_OK
+        return response
diff --git a/src/qkd_app/.gitlab-ci.yml b/src/qkd_app/.gitlab-ci.yml
index 5bba29ca5a31f0f32b70f9bf2be996cf05cd1b4e..573065b9e53980bcf794893a2ddfb937d03e3e5e 100644
--- a/src/qkd_app/.gitlab-ci.yml
+++ b/src/qkd_app/.gitlab-ci.yml
@@ -58,6 +58,50 @@ unit_test app:
     - docker logs $IMAGE_NAME
     - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
     - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+
+    # Mock QKD Nodes Deployment
+    - |
+      echo "Starting stage: deploy_mock_nodes"
+    - pip install flask  # Install Flask to ensure it is available
+    - |
+      for port in 11111 22222 33333; do
+        if lsof -i:$port >/dev/null 2>&1; then
+          echo "Freeing up port $port..."
+          fuser -k $port/tcp
+        fi
+      done
+      MOCK_NODES_DIR="$PWD/src/tests/tools/mock_qkd_nodes"
+      if [ -d "$MOCK_NODES_DIR" ]; then
+        cd "$MOCK_NODES_DIR" || exit
+        ./start.sh &
+        MOCK_NODES_PID=$!
+      else
+        echo "Error: Mock QKD nodes directory '$MOCK_NODES_DIR' not found."
+        exit 1
+      fi
+      echo "Waiting for mock nodes to be up..."
+      RETRY_COUNT=0
+      MAX_RETRIES=15
+      while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
+        if curl -s http://127.0.0.1:11111 > /dev/null && \
+           curl -s http://127.0.0.1:22222 > /dev/null && \
+           curl -s http://127.0.0.1:33333 > /dev/null; then
+            echo "Mock nodes are up!"
+            break
+        else
+            echo "Mock nodes not ready, retrying in 5 seconds..."
+            RETRY_COUNT=$((RETRY_COUNT + 1))
+            sleep 5
+        fi
+      done
+      if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then
+        echo "Error: Mock nodes failed to start after multiple attempts."
+        exit 1
+      fi
+      
+  # Run additional QKD unit tests
+    - docker exec -i $IMAGE_NAME bash -c "pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_create_apps.py"
+    - docker exec -i $IMAGE_NAME bash -c "pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_Set_new_configuration.py"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
     - docker rm -f $IMAGE_NAME
diff --git a/src/device/tests/qkd/unit/test_create_apps.py b/src/qkd_app/tests/test_create_apps.py
similarity index 98%
rename from src/device/tests/qkd/unit/test_create_apps.py
rename to src/qkd_app/tests/test_create_apps.py
index 557f8c23a8f976ea14d88834a2cd4e2f50cc1035..82394e862889c358601ff6f6e853ac43437c1ae5 100644
--- a/src/device/tests/qkd/unit/test_create_apps.py
+++ b/src/qkd_app/tests/test_create_apps.py
@@ -14,7 +14,7 @@
 
 import requests
 
-QKD_ADDRESS = '10.0.2.10'
+QKD_ADDRESS = '127.0.0.1'
 QKD_URL     = 'http://{:s}/qkd_app/create_qkd_app'.format(QKD_ADDRESS)
 
 QKD_REQUEST_1 = {
diff --git a/src/device/tests/qkd/unit/test_set_new_configuration.py b/src/qkd_app/tests/test_set_new_configuration.py
similarity index 98%
rename from src/device/tests/qkd/unit/test_set_new_configuration.py
rename to src/qkd_app/tests/test_set_new_configuration.py
index 3515f458494ac1b5616fc0ff7b12f3031c0aea53..1b5dfa2ba441b6f0d60d127d7f7acf6170d6d2da 100644
--- a/src/device/tests/qkd/unit/test_set_new_configuration.py
+++ b/src/qkd_app/tests/test_set_new_configuration.py
@@ -53,7 +53,7 @@ def create_qkd_app(driver, qkdn_id, backing_qkdl_id, client_app_id=None):
         print(f"Sending payload to {driver.address}: {app_payload}")
 
         # Send POST request to create the application
-        response = requests.post(f'http://{driver.address}/app/create_qkd_app', json=app_payload)
+        response = requests.post(f'http://{driver.address}/qkd_app/create_qkd_app', json=app_payload)
         
         # Check if the request was successful (HTTP 2xx)
         response.raise_for_status()
diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml
index b8ca2c14377e88170e2628843b17aab388362e86..37c5bb0dc86edc2760a8e1b5fdf4a00f48167d50 100644
--- a/src/service/.gitlab-ci.yml
+++ b/src/service/.gitlab-ci.yml
@@ -49,22 +49,22 @@ unit_test service:
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
     - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
+    - |
+      # Context-related cleanup
+      if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+      if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+      if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
 
-    # Context-related
-    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
-    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
-    - if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
+      # Device-related cleanup
+      if docker container ls | grep context; then docker rm -f context; else echo "context image is not in the system"; fi
+      if docker container ls | grep device; then docker rm -f device; else echo "device image is not in the system"; fi
 
-    # Device-related
-    - if docker container ls | grep context; then docker rm -f context; else echo "context image is not in the system"; fi
-    - if docker container ls | grep device; then docker rm -f device; else echo "device image is not in the system"; fi
+      # Pathcomp-related cleanup
+      if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi
+      if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi
 
-    # Pathcomp-related
-    - if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi
-    - if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi
-
-    # Service-related
-    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
+      # Service-related cleanup
+      if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
 
   script:
     - docker pull "cockroachdb/cockroach:latest-v22.2"
@@ -76,87 +76,128 @@ unit_test service:
     - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
 
     # Context preparation
-    - docker volume create crdb
-    - >
-      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
-      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
-      --volume "crdb:/cockroach/cockroach-data"
+    - |
+      docker volume create crdb
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 \
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 \
+      --volume "crdb:/cockroach/cockroach-data" \
       cockroachdb/cockroach:latest-v22.2 start-single-node
-    - >
-      docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222
+      docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 \
       nats:2.9 --http_port 8222 --user tfs --pass tfs123
-    - echo "Waiting for initialization..."
-    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
-    - docker logs crdb
-    - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
-    - docker logs nats
-    - docker ps -a
-    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
-    - echo $CRDB_ADDRESS
-    - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
-    - echo $NATS_ADDRESS
-    - >
-      docker run --name context -d -p 1010:1010
-      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
-      --env "MB_BACKEND=nats"
-      --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222"
-      --network=teraflowbridge
+      echo "Waiting for initialization..."
+      while ! docker logs crdb 2>&1 | grep -q 'finished creating default user "tfs"'; do sleep 1; done
+      docker logs crdb
+      while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
+      docker logs nats
+      docker ps -a
+      CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+      echo $CRDB_ADDRESS
+      NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+      echo $NATS_ADDRESS
+
+    # Context Service Preparation
+    - |
+      docker run --name context -d -p 1010:1010 \
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" \
+      --env "MB_BACKEND=nats" \
+      --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222" \
+      --network=teraflowbridge \
       $CI_REGISTRY_IMAGE/context:$IMAGE_TAG
-    - CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
-    - echo $CONTEXTSERVICE_SERVICE_HOST
+      CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+      echo $CONTEXTSERVICE_SERVICE_HOST
 
     # Device preparation
-    - >
-      docker run --name device -d -p 2020:2020
-      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
-      --network=teraflowbridge
+    - |
+      docker run --name device -d -p 2020:2020 \
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" \
+      --network=teraflowbridge \
       $CI_REGISTRY_IMAGE/device:$IMAGE_TAG
-    - DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
-    - echo $DEVICESERVICE_SERVICE_HOST
+      DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+      echo $DEVICESERVICE_SERVICE_HOST
 
     # PathComp preparation
-    - >
-      docker run --name pathcomp-backend -d -p 8081:8081
-      --network=teraflowbridge
+    - |
+      docker run --name pathcomp-backend -d -p 8081:8081 \
+      --network=teraflowbridge \
       $CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG
-    - PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
-    - echo $PATHCOMP_BACKEND_HOST
-    - sleep 1
-    - >
-      docker run --name pathcomp-frontend -d -p 10020:10020
-      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
-      --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}"
-      --env "PATHCOMP_BACKEND_PORT=8081"
-      --network=teraflowbridge
+      PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+      echo $PATHCOMP_BACKEND_HOST
+      sleep 1
+      docker run --name pathcomp-frontend -d -p 10020:10020 \
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" \
+      --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}" \
+      --env "PATHCOMP_BACKEND_PORT=8081" \
+      --network=teraflowbridge \
       $CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG
-    - sleep 1
-    - PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
-    - echo $PATHCOMPSERVICE_SERVICE_HOST
+      sleep 1
+      PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+      echo $PATHCOMPSERVICE_SERVICE_HOST
 
     # Service preparation
-    - >
-      docker run --name $IMAGE_NAME -d -p 3030:3030
-      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
-      --env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}"
-      --env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}"
-      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
-      --network=teraflowbridge
+    - |
+      docker run --name $IMAGE_NAME -d -p 3030:3030 \
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" \
+      --env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}" \
+      --env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}" \
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" \
+      --network=teraflowbridge \
       $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+      sleep 5
+      docker ps -a
+      docker logs context
+      docker logs device
+      docker logs pathcomp-frontend
+      docker logs pathcomp-backend
+      docker logs $IMAGE_NAME
 
-    # Check status before the tests
-    - sleep 5
-    - docker ps -a
-    - docker logs context
-    - docker logs device
-    - docker logs pathcomp-frontend
-    - docker logs pathcomp-backend
-    - docker logs $IMAGE_NAME
+    # Mock QKD Nodes Deployment
+    - |
+      echo "Starting stage: deploy_mock_nodes"
+    - pip install flask  # Install Flask to ensure it is available
+    - |
+      for port in 11111 22222 33333; do
+        if lsof -i:$port >/dev/null 2>&1; then
+          echo "Freeing up port $port..."
+          fuser -k $port/tcp
+        fi
+      done
+      MOCK_NODES_DIR="$PWD/src/tests/tools/mock_qkd_nodes"
+      if [ -d "$MOCK_NODES_DIR" ]; then
+        cd "$MOCK_NODES_DIR" || exit
+        ./start.sh &
+        MOCK_NODES_PID=$!
+      else
+        echo "Error: Mock QKD nodes directory '$MOCK_NODES_DIR' not found."
+        exit 1
+      fi
+      echo "Waiting for mock nodes to be up..."
+      RETRY_COUNT=0
+      MAX_RETRIES=15
+      while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
+        if curl -s http://127.0.0.1:11111 > /dev/null && \
+           curl -s http://127.0.0.1:22222 > /dev/null && \
+           curl -s http://127.0.0.1:33333 > /dev/null; then
+            echo "Mock nodes are up!"
+            break
+        else
+            echo "Mock nodes not ready, retrying in 5 seconds..."
+            RETRY_COUNT=$((RETRY_COUNT + 1))
+            sleep 5
+        fi
+      done
+      if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then
+        echo "Error: Mock nodes failed to start after multiple attempts."
+        exit 1
+      fi
 
     # Run the tests
-    - >
-      docker exec -i $IMAGE_NAME bash -c
+    - |
+      docker exec -i $IMAGE_NAME bash -c \
       "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
-    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+      docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+
+    # Run QKD Bootstrap Test
+    - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose service/tests/qkd/test_functional_bootstrap.py"
 
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
diff --git a/src/service/Dockerfile b/src/service/Dockerfile
index 49efe9829d63f1923b3bb271c66a05b02bd8e499..6be7139ba47aff1558e8a54a834218b191f492d7 100644
--- a/src/service/Dockerfile
+++ b/src/service/Dockerfile
@@ -32,6 +32,10 @@ RUN python3 -m pip install --upgrade pip
 RUN python3 -m pip install --upgrade setuptools wheel
 RUN python3 -m pip install --upgrade pip-tools
 
+# Install Flask globally
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install flask
+
 # Get common Python packages
 # Note: this step enables sharing the previous Docker build steps among all the Python components
 WORKDIR /var/teraflow
diff --git a/src/device/tests/qkd/unit/descriptorQKD_links.json b/src/service/tests/qkd/descriptorQKD_links.json
similarity index 68%
rename from src/device/tests/qkd/unit/descriptorQKD_links.json
rename to src/service/tests/qkd/descriptorQKD_links.json
index 28a9e7d5ae014f78cfa0e554ee73a53449bba03c..d80864cb0bfd8ee1fed11a6af482f50620953894 100644
--- a/src/device/tests/qkd/unit/descriptorQKD_links.json
+++ b/src/service/tests/qkd/descriptorQKD_links.json
@@ -10,68 +10,64 @@
             "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node",
             "device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
             "device_config": {"config_rules": [
-                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "<YOUR_MACHINE_IP>"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "11111"}},
                 {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
                     "scheme": "http"
                 }}}
             ]}
-
         },
         {
             "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node",
             "device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
             "device_config": {"config_rules": [
-                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "<YOUR_MACHINE_IP>"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "22222"}},
                 {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
                     "scheme": "http"
                 }}}
             ]}
-
         },
-	{
+        {
             "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node",
             "device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
             "device_config": {"config_rules": [
-                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "<YOUR_MACHINE_IP>"}},
                 {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "33333"}},
                 {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
                     "scheme": "http"
                 }}}
             ]}
-
         }
     ],
     "links": [
-	{
-            "link_id": {"link_uuid": {"uuid": "QKD1/10.0.2.10:1001==QKD2/10.0.2.10:2001"}},
+        {
+            "link_id": {"link_uuid": {"uuid": "QKD1/<YOUR_MACHINE_IP>:1001==QKD2/<YOUR_MACHINE_IP>:2001"}},
             "link_endpoint_ids": [
-                {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}},
-                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}}
+                {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:1001"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2001"}}
             ]
         },
         {
-            "link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2001==QKD1/10.0.2.10:1001"}},
+            "link_id": {"link_uuid": {"uuid": "QKD2/<YOUR_MACHINE_IP>:2001==QKD1/<YOUR_MACHINE_IP>:1001"}},
             "link_endpoint_ids": [
-		        {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}},
-                {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}}
+                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2001"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:1001"}}
             ]
         },
-	{
-            "link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2002==QKD3/10.0.2.10:3001"}},
+        {
+            "link_id": {"link_uuid": {"uuid": "QKD2/<YOUR_MACHINE_IP>:2002==QKD3/<YOUR_MACHINE_IP>:3001"}},
             "link_endpoint_ids": [
-		        {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}},
-                {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}}
+                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2002"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:3001"}}
             ]
         },
-	{
-            "link_id": {"link_uuid": {"uuid": "QKD3/10.0.2.10:3001==QKD2/10.0.2.10:2002"}},
+        {
+            "link_id": {"link_uuid": {"uuid": "QKD3/<YOUR_MACHINE_IP>:3001==QKD2/<YOUR_MACHINE_IP>:2002"}},
             "link_endpoint_ids": [
-                {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}},
-                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}}
+                {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:3001"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2002"}}
             ]
         }
-
     ]
-}
+}
\ No newline at end of file
diff --git a/src/service/tests/qkd/test_functional_bootstrap.py b/src/service/tests/qkd/test_functional_bootstrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..daf35f0de5c56697f0380d1f32056a918bcba691
--- /dev/null
+++ b/src/service/tests/qkd/test_functional_bootstrap.py
@@ -0,0 +1,152 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os, time, json, socket, re
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from tests.Fixtures import context_client, device_client # pylint: disable=unused-import
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+# Update the path to your QKD descriptor file
+DESCRIPTOR_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'descriptorQKD_links.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+def load_descriptor_with_runtime_ip(descriptor_file_path):
+    """
+    Load the descriptor file and replace placeholder IP with the machine's IP address.
+    """
+    with open(descriptor_file_path, 'r') as descriptor_file:
+        descriptor = descriptor_file.read()
+
+    # Get the current machine's IP address
+    try:
+        # Use socket to get the local IP address directly from the network interface
+        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        s.connect(("8.8.8.8", 80))
+        current_ip = s.getsockname()[0]
+        s.close()
+    except Exception as e:
+        raise Exception(f"Unable to get the IP address: {str(e)}")
+
+    # Replace all occurrences of <YOUR_MACHINE_IP> with the current IP
+    updated_descriptor = re.sub(r"<YOUR_MACHINE_IP>", current_ip, descriptor)
+
+    # Write updated descriptor back
+    with open(descriptor_file_path, 'w') as descriptor_file:
+        descriptor_file.write(updated_descriptor)
+
+    return json.loads(updated_descriptor)
+
+def load_and_process_descriptor(context_client, device_client, descriptor_file_path):
+    """
+    Function to load and process descriptor programmatically, similar to what WebUI does.
+    """
+    print(f"Loading descriptor from file: {descriptor_file_path}")
+    try:
+        # Update the descriptor with the runtime IP address
+        descriptor = load_descriptor_with_runtime_ip(descriptor_file_path)
+
+        # Initialize DescriptorLoader with the updated descriptor file
+        descriptor_loader = DescriptorLoader(
+            descriptors_file=descriptor_file_path, context_client=context_client, device_client=device_client
+        )
+
+        # Process and validate the descriptor
+        print("Processing the descriptor...")
+        results = descriptor_loader.process()
+        print(f"Descriptor processing results: {results}")
+
+        print("Checking descriptor load results...")
+        check_descriptor_load_results(results, descriptor_loader)
+
+        print("Validating descriptor...")
+        descriptor_loader.validate()
+        print("Descriptor validated successfully.")
+    except Exception as e:
+        LOGGER.error(f"Failed to load and process descriptor: {e}")
+        raise e
+
+def test_qkd_scenario_bootstrap(
+    context_client: ContextClient,  # pylint: disable=redefined-outer-name
+    device_client: DeviceClient,    # pylint: disable=redefined-outer-name
+) -> None:
+    """
+    This test validates that the QKD scenario is correctly bootstrapped.
+    """
+    print("Starting QKD scenario bootstrap test...")
+
+    # Check if context_client and device_client are instantiated
+    if context_client is None:
+        print("Error: context_client is not instantiated!")
+    else:
+        print(f"context_client is instantiated: {context_client}")
+
+    if device_client is None:
+        print("Error: device_client is not instantiated!")
+    else:
+        print(f"device_client is instantiated: {device_client}")
+
+    # Validate empty scenario
+    print("Validating empty scenario...")
+    validate_empty_scenario(context_client)
+
+    # Load the descriptor
+    load_and_process_descriptor(context_client, device_client, DESCRIPTOR_FILE_PATH)
+
+def test_qkd_devices_enabled(
+    context_client: ContextClient,  # pylint: disable=redefined-outer-name
+) -> None:
+    """
+    This test validates that the QKD devices are enabled.
+    """
+    print("Starting QKD devices enabled test...")
+
+    # Check if context_client is instantiated
+    if context_client is None:
+        print("Error: context_client is not instantiated!")
+    else:
+        print(f"context_client is instantiated: {context_client}")
+
+    DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+    num_devices = -1
+    num_devices_enabled, num_retry = 0, 0
+
+    while (num_devices != num_devices_enabled) and (num_retry < 10):
+        print(f"Attempt {num_retry + 1}: Checking device status...")
+
+        time.sleep(1.0)  # Add a delay to allow for device enablement
+
+        response = context_client.ListDevices(Empty())
+        num_devices = len(response.devices)
+        print(f"Total devices found: {num_devices}")
+
+        num_devices_enabled = 0
+        for device in response.devices:
+            if device.device_operational_status == DEVICE_OP_STATUS_ENABLED:
+                num_devices_enabled += 1
+        
+        print(f"Devices enabled: {num_devices_enabled}/{num_devices}")
+        num_retry += 1
+
+    # Final check to ensure all devices are enabled
+    print(f"Final device status: {num_devices_enabled}/{num_devices} devices enabled.")
+    assert num_devices_enabled == num_devices
+    print("QKD devices enabled test completed.")
\ No newline at end of file
diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml
index dfccc4726b69c6346facb64c437364ff3b4b8aeb..5a0dd6883e3e788325e6e38854341a48f8e0c987 100644
--- a/src/tests/.gitlab-ci.yml
+++ b/src/tests/.gitlab-ci.yml
@@ -14,16 +14,18 @@
 
 # include the individual .gitlab-ci.yml of each end-to-end integration test
 include:
-  - local: '/src/tests/ofc22/.gitlab-ci.yml'
-  #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml'
-  - local: '/src/tests/ecoc22/.gitlab-ci.yml'
-  #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml'
-  #- local: '/src/tests/ofc23/.gitlab-ci.yml'
-  - local: '/src/tests/ofc24/.gitlab-ci.yml'
-  - local: '/src/tests/eucnc24/.gitlab-ci.yml'
-  #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml'
-  #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml'
-  #- local: '/src/tests/ofc25/.gitlab-ci.yml'
-  #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml'
+#  - local: '/src/tests/ofc22/.gitlab-ci.yml'
+#  #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml'
+#  - local: '/src/tests/ecoc22/.gitlab-ci.yml'
+#  #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml'
+#  #- local: '/src/tests/ofc23/.gitlab-ci.yml'
+#  - local: '/src/tests/ofc24/.gitlab-ci.yml'
+#  - local: '/src/tests/eucnc24/.gitlab-ci.yml'
+#  #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml'
+#  #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml'
+#  #- local: '/src/tests/ofc25/.gitlab-ci.yml'
+#  #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml'
+  - local: '/src/tests/qkd_end2end/.gitlab-ci.yml'
 
   - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml'
+  - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml'
diff --git a/src/tests/qkd_end2end/.gitlab-ci.yml b/src/tests/qkd_end2end/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fd1c65b84bd2c50481f823b38ae255583ec13ceb
--- /dev/null
+++ b/src/tests/qkd_end2end/.gitlab-ci.yml
@@ -0,0 +1,119 @@
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build qkd_end2end:
+  variables:
+    TEST_NAME: 'qkd_end2end'
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile .
+    - docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
+    - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/tests/${TEST_NAME}/**/*.{py,in,sh,yml}
+      - src/tests/${TEST_NAME}/Dockerfile
+      - .gitlab-ci.yml
+
+# Deploy TeraFlowSDN and Execute end-2-end test
+end2end_test qkd_end2end:
+  variables:
+    TEST_NAME: 'qkd_end2end'
+  stage: end2end_test
+  # Needs to run after build stage
+  needs:
+    - build qkd_end2end
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - docker rm -f crdb nats mock_qkd context device pathcomp-frontend pathcomp-backend service qkd_end2end
+    - docker network rm teraflowbridge
+
+  script:
+    # Create Docker network for inter-container communication
+    - docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 teraflowbridge
+
+    # Pull necessary images
+    - docker pull "$CI_REGISTRY_IMAGE/context:latest"
+    - docker pull "$CI_REGISTRY_IMAGE/device:latest"
+    - docker pull "$CI_REGISTRY_IMAGE/service:latest"
+    - docker pull "$CI_REGISTRY_IMAGE/pathcomp-frontend:latest"
+    - docker pull "$CI_REGISTRY_IMAGE/pathcomp-backend:latest"
+    - docker pull "$CI_REGISTRY_IMAGE/qkd_app:latest"
+    - docker pull "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
+
+    # Deploy CockroachDB (crdb) and NATS
+    - docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:latest-v22.2 start-single-node --insecure
+    - docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 nats:2.9 --http_port 8222 --user tfs --pass tfs123
+
+    # Wait for CockroachDB and NATS to initialize
+    - echo "Waiting for CockroachDB to be ready..."
+    - while ! docker logs crdb 2>&1 | grep -q 'CockroachDB node starting'; do sleep 1; done
+    - docker logs crdb
+    - echo "Waiting for NATS to be ready..."
+    - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
+    - docker logs nats
+
+    # Start mock QKD nodes
+    - docker run --name mock_qkd -d --network=teraflowbridge -v "$PWD/src/tests/tools/mock_qkd_nodes:/app" python:3.9-slim bash -c "cd /app && ./start.sh"
+
+    # Wait for mock QKD nodes to initialize
+    - echo "Waiting for mock QKD nodes to be ready..."
+    - sleep 10
+
+    # Deploy TeraFlowSDN services
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+
+    # Deploy Context Service
+    - docker run --name context -d -p 1010:1010 --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" --env "MB_BACKEND=nats" --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222" --network=teraflowbridge $CI_REGISTRY_IMAGE/context:latest
+    - CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+
+    # Deploy Device Service
+    - docker run --name device -d -p 2020:2020 --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" --network=teraflowbridge $CI_REGISTRY_IMAGE/device:latest
+    - DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+
+    # Deploy PathComp Services (frontend and backend)
+    - docker run --name pathcomp-backend -d -p 8081:8081 --network=teraflowbridge $CI_REGISTRY_IMAGE/pathcomp-backend:latest
+    - PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - docker run --name pathcomp-frontend -d -p 10020:10020 --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}" --env "PATHCOMP_BACKEND_PORT=8081" --network=teraflowbridge $CI_REGISTRY_IMAGE/pathcomp-frontend:latest
+    - PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+
+    # Deploy Service Component
+    - docker run --name service -d -p 3030:3030 --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" --env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}" --env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}" --volume "$PWD/src/service/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/service:latest
+
+    # Wait for services to initialize
+    - sleep 10
+
+    # Run end-to-end tests for QKD application
+    - docker run --name ${TEST_NAME} -t --network=teraflowbridge --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" $CI_REGISTRY_IMAGE/${TEST_NAME}:latest
+
+  after_script:
+    # Dump logs for TeraFlowSDN components
+    - docker logs context
+    - docker logs device
+    - docker logs pathcomp-frontend
+    - docker logs pathcomp-backend
+    - docker logs service
+
+    # Dump logs for QKD mock nodes
+    - docker logs mock_qkd
+
+    # Clean up
+    - docker rm -f context device pathcomp-frontend pathcomp-backend service mock_qkd crdb nats
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+  artifacts:
+      when: always
+      reports:
+        junit: ./src/tests/${TEST_NAME}/report_*.xml
diff --git a/src/tests/qkd_end2end/deploy_specs.sh b/src/tests/qkd_end2end/deploy_specs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..face2f5b042b379b72c1544384461dc0f08d79c7
--- /dev/null
+++ b/src/tests/qkd_end2end/deploy_specs.sh
@@ -0,0 +1,213 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device pathcomp service nbi webui"
+
+# Uncomment to activate Monitoring (old)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
+
+# Uncomment to activate QoS Profiles
+#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
+
+# Uncomment to activate BGP-LS Speaker
+#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
+
+# Uncomment to activate Optical Controller
+#   To manage optical connections, "service" requires "opticalcontroller" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
+#fi
+
+# Uncomment to activate ZTP
+#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
+
+# Uncomment to activate Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
+# Uncomment to activate Forecaster
+#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
+
+# Uncomment to activate E2E Orchestrator
+#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
+
+# Uncomment to activate VNT Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager"
+
+# Uncomment to activate DLT and Interdomain
+#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt"
+#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then
+#    export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk"
+#    export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem"
+#    export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt"
+#fi
+
+# Uncomment to activate QKD App
+#   To manage QKD Apps, "service" requires "qkd_app" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
+if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+    BEFORE="${TFS_COMPONENTS% service*}"
+    AFTER="${TFS_COMPONENTS#* service}"
+    export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
+fi
+
+# Uncomment to activate Load Generator
+#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator"
+
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY="YES"
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4222"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8222"
+
+# Set NATS installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/nats.sh for additional details
+export NATS_DEPLOY_MODE="single"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8812"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9009"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9000"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
+
+
+# ----- Apache Kafka -----------------------------------------------------------
+
+# Set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE="kafka"
+
+# Set the port Apache Kafka server will be exposed to.
+export KFK_SERVER_PORT="9092"
+
+# Set the flag to YES for redeploying of Apache Kafka
+export KFK_REDEPLOY=""
diff --git a/src/tests/qkd_end2end/redeploy-tfs.sh b/src/tests/qkd_end2end/redeploy-tfs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7433ae4a1445b97b29bc17465f08b392573b903a
--- /dev/null
+++ b/src/tests/qkd_end2end/redeploy-tfs.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source ~/tfs-ctrl/src/tests/qkd_end2end/deploy_specs.sh
+./deploy/all.sh
diff --git a/src/tests/qkd_end2end/topology_qkd.json b/src/tests/qkd_end2end/topology_qkd.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa301baff1b0f6e793eec37ffb37b5641fb41d3e
--- /dev/null
+++ b/src/tests/qkd_end2end/topology_qkd.json
@@ -0,0 +1,52 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "11111"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "22222"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "33333"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}}
+            ]}
+        }
+    ],
+    "links": [
+        {"link_id": {"link_uuid": {"uuid": "QKD1/10.0.2.10:1001==QKD2/10.0.2.10:2001"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}},
+            {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2001==QKD1/10.0.2.10:1001"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}},
+            {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2002==QKD3/10.0.2.10:3001"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}},
+            {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}}
+        ]},
+        {"link_id": {"link_uuid": {"uuid": "QKD3/10.0.2.10:3001==QKD2/10.0.2.10:2002"}}, "link_endpoint_ids": [
+            {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}},
+            {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}}
+        ]}
+    ]
+}
diff --git a/src/tests/tools/mock_qkd_node/.gitlab-ci.yml b/src/tests/tools/mock_qkd_node/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..787f270722c7232824e5801f53462bf85248e320
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/.gitlab-ci.yml
@@ -0,0 +1,39 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build mock_qkd_node:
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$CI_REGISTRY_IMAGE/mock_qkd_node:test" -f ./src/tests/tools/mock_qkd_node/Dockerfile .
+    - docker push "$CI_REGISTRY_IMAGE/mock_qkd_node:test"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/src/tests/tools/mock_qkd_node/**/*.{py,in,yml,yaml}
+      - src/src/tests/tools/mock_qkd_node/Dockerfile
+      - src/device/**/*.{py,in,yml}
+      - src/device/Dockerfile
+      - src/device/tests/*.py
+      - src/qkd_app/**/*.{py,in,yml}
+      - src/qkd_app/Dockerfile
+      - src/qkd_app/tests/*.py
+      - .gitlab-ci.yml
diff --git a/src/tests/tools/mock_qkd_node/Dockerfile b/src/tests/tools/mock_qkd_node/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..510de08316d54f074eade12c7aed3a39f616fb37
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/Dockerfile
@@ -0,0 +1,58 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install g++ git build-essential cmake libpcre2-dev python3-dev python3-cffi && \
+    rm -rf /var/lib/apt/lists/*
+
+# Download, build and install libyang. Note that APT package is outdated
+# - Ref: https://github.com/CESNET/libyang
+# - Ref: https://github.com/CESNET/libyang-python/
+RUN mkdir -p /var/libyang
+RUN git clone https://github.com/CESNET/libyang.git /var/libyang
+WORKDIR /var/libyang
+RUN git fetch
+RUN git checkout v2.1.148
+RUN mkdir -p /var/libyang/build
+WORKDIR /var/libyang/build
+RUN cmake -D CMAKE_BUILD_TYPE:String="Release" ..
+RUN make
+RUN make install
+RUN ldconfig
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Create component sub-folders, and copy content
+RUN mkdir -p /var/teraflow/mock_qkd_node/{data,yang}
+WORKDIR /var/teraflow/mock_qkd_node
+COPY yang/. yang/
+COPY requirements.in requirements.in
+COPY wsgi.py wsgi.py
+COPY YangValidator.py YangValidator.py
+
+# Get specific Python packages
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Start the service
+ENTRYPOINT ["gunicorn", "--workers", "1", "--bind", "0.0.0.0:8080", "wsgi:app"]
diff --git a/src/tests/tools/mock_qkd_node/QkdNode.py b/src/tests/tools/mock_qkd_node/QkdNode.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c4f27696206428a23136a682e21c4cfb67037eb
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/QkdNode.py
@@ -0,0 +1,46 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# REST-API resource implementing minimal support for "IETF YANG Data Model for Transport Network Client Signals".
+# Ref: https://www.ietf.org/archive/id/draft-ietf-ccamp-client-signal-yang-10.html
+
+import json, os
+from flask import jsonify, make_response, request
+from flask_restful import Resource
+
+
+DATA_FILE_PATH = os.environ.get('DATA_FILE_PATH')
+if DATA_FILE_PATH is None:
+    raise Exception('DataFile({:s}) is not defined'.format(str(DATA_FILE_PATH)))
+
+if not os.path.isfile(DATA_FILE_PATH):
+    raise Exception('DataFile({:s}) not found'.format(str(DATA_FILE_PATH)))
+
+with open(DATA_FILE_PATH, mode='r', encoding='UTF-8') as fp:
+    QKD_NODE = json.load(fp)
+
+class QkdNode(Resource):
+    def get(self):
+        return make_response(jsonify(QKD_NODE), 200)
+
+    def post(self):
+        json_request = request.get_json()
+        slice_id = json_request["network-slice-services"]["slice-service"][0]["id"]
+        QKD_NODE[slice_id] = json_request
+        return make_response(jsonify({}), 201)
+
+    def delete(self, slice_id : str):
+        slice = QKD_NODE.pop(slice_id, None)
+        data, status = ({}, 404) if slice is None else (slice, 204)
+        return make_response(jsonify(data), status)
diff --git a/src/tests/tools/mock_qkd_node/README.md b/src/tests/tools/mock_qkd_node/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7087fe061f2ca89879c819fede035763695943f1
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/README.md
@@ -0,0 +1,34 @@
+# Mock QKD Node
+
+This Mock implements very basic support for the software-defined QKD node information models specified in ETSI GS QKD 015 V2.1.1.
+
+The aim of this mock is to enable testing the TFS QKD Framework with an emulated data plane.
+
+
+## Build the Mock QKD Node Docker image
+```bash
+./build.sh
+```
+
+## Run the Mock QKD Node as a container:
+```bash
+docker network create --driver bridge --subnet=172.254.252.0/24 --gateway=172.254.252.254 tfs-qkd-net-mgmt
+
+docker run --name qkd-node-01 --detach --publish 80:80 \
+  --network=tfs-qkd-net-mgmt --ip=172.254.252.101 \
+  --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \
+  --volume "$PWD/src/tests/mock-qkd-node/data/database-01.json:/var/teraflow/mock-qkd-node/data/database.json" \
+  mock-qkd-node:test
+
+docker run --name qkd-node-02 --detach --publish 80:80 \
+  --network=tfs-qkd-net-mgmt --ip=172.254.252.102 \
+  --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \
+  --volume "$PWD/src/tests/mock-qkd-node/data/database-02.json:/var/teraflow/mock-qkd-node/data/database.json" \
+  mock-qkd-node:test
+
+docker run --name qkd-node-03 --detach --publish 80:80 \
+  --network=tfs-qkd-net-mgmt --ip=172.254.252.103 \
+  --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \
+  --volume "$PWD/src/tests/mock-qkd-node/data/database-03.json:/var/teraflow/mock-qkd-node/data/database.json" \
+  mock-qkd-node:test
+```
diff --git a/src/tests/tools/mock_qkd_nodes/YangValidator.py b/src/tests/tools/mock_qkd_node/YangValidator.py
similarity index 99%
rename from src/tests/tools/mock_qkd_nodes/YangValidator.py
rename to src/tests/tools/mock_qkd_node/YangValidator.py
index 4948239ed7430685699af2a7a4fafbcffd7dbb25..6214074c8e1d470f249ae86887384342cfcca970 100644
--- a/src/tests/tools/mock_qkd_nodes/YangValidator.py
+++ b/src/tests/tools/mock_qkd_node/YangValidator.py
@@ -26,8 +26,6 @@ class YangValidator:
 
         for mod in mods:
             mod.feature_enable_all()
-        
-
 
     def parse_to_dict(self, message : Dict) -> Dict:
         dnode : Optional[libyang.DNode] = self._yang_module.parse_data_dict(
diff --git a/src/tests/tools/mock_qkd_node/__init__.py b/src/tests/tools/mock_qkd_node/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/tools/mock_qkd_nodes/start.sh b/src/tests/tools/mock_qkd_node/build.sh
similarity index 57%
rename from src/tests/tools/mock_qkd_nodes/start.sh
rename to src/tests/tools/mock_qkd_node/build.sh
index f0409747ca35d7b39c1bfa69a1f76df9cc2415ca..8360d93b9a4a44de4ecdbe12b8137bad496df69d 100755
--- a/src/tests/tools/mock_qkd_nodes/start.sh
+++ b/src/tests/tools/mock_qkd_node/build.sh
@@ -13,30 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-#!/bin/bash
-cd "$(dirname "$0")"
-
-# Function to kill all background processes
-killbg() {
-    for p in "${pids[@]}" ; do
-        kill "$p";
-    done
-}
-
-trap killbg EXIT
-pids=()
-
-# Set FLASK_APP and run the Flask instances on different ports
-export FLASK_APP=wsgi
-flask run --host 0.0.0.0 --port 11111 & 
-pids+=($!)
-
-flask run --host 0.0.0.0 --port 22222 & 
-pids+=($!)
-
-flask run --host 0.0.0.0 --port 33333 & 
-pids+=($!)
-
-# Wait for all background processes to finish
-wait
+# Make folder containing the script the root folder for its execution
+cd $(dirname $0)
 
+docker build -t mock-qkd-node:test -f Dockerfile .
+docker tag mock-qkd-node:test localhost:32000/tfs/mock-qkd-node:test
+docker push localhost:32000/tfs/mock-qkd-node:test
diff --git a/src/tests/tools/mock_qkd_node/data/database-01.json b/src/tests/tools/mock_qkd_node/data/database-01.json
new file mode 100644
index 0000000000000000000000000000000000000000..1f78f7d6d52f237d19a52d351804968532ea2ca8
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/data/database-01.json
@@ -0,0 +1,20 @@
+{
+    "node": {"qkdn_id": "00000001-0000-0000-0000-000000000000"},
+    "qkdn_capabilities": {},
+    "qkd_applications": {"qkd_app": []},
+    "qkd_interfaces": {
+        "qkd_interface": [
+            {
+                "qkdi_id": "100",
+                "qkdi_att_point": {},
+                "qkdi_capabilities": {}
+            },
+            {
+                "qkdi_id": "101",
+                "qkdi_att_point": {"device": current_ip, "port": "1001"},
+                "qkdi_capabilities": {}
+            }
+        ]
+    },
+    "qkd_links": {"qkd_link": []}
+}
diff --git a/src/tests/tools/mock_qkd_node/data/database-02.json b/src/tests/tools/mock_qkd_node/data/database-02.json
new file mode 100644
index 0000000000000000000000000000000000000000..4e3a00724b9cb59c7c06ab88804619fafb327512
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/data/database-02.json
@@ -0,0 +1,25 @@
+{
+    "node": {"qkdn_id": "00000002-0000-0000-0000-000000000000"},
+    "qkdn_capabilities": {},
+    "qkd_applications": {"qkd_app": []},
+    "qkd_interfaces": {
+        "qkd_interface": [
+            {
+                "qkdi_id": "200",
+                "qkdi_att_point": {},
+                "qkdi_capabilities": {}
+            },
+            {
+                "qkdi_id": "201",
+                "qkdi_att_point": {"device": current_ip, "port": "2001"},
+                "qkdi_capabilities": {}
+            },
+            {
+                "qkdi_id": "202",
+                "qkdi_att_point": {"device": current_ip, "port": "2002"},
+                "qkdi_capabilities": {}
+            }
+        ]
+    },
+    "qkd_links": {"qkd_link": []}
+}
diff --git a/src/tests/tools/mock_qkd_node/data/database-03.json b/src/tests/tools/mock_qkd_node/data/database-03.json
new file mode 100644
index 0000000000000000000000000000000000000000..b6dbb7aa514ecedbb21061264931bc116f9007e2
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/data/database-03.json
@@ -0,0 +1,20 @@
+{
+    "node": {"qkdn_id": "00000003-0000-0000-0000-000000000000"},
+    "qkdn_capabilities": {},
+    "qkd_applications": {"qkd_app": []},
+    "qkd_interfaces": {
+        "qkd_interface": [
+            {
+                "qkdi_id": "300",
+                "qkdi_att_point": {},
+                "qkdi_capabilities": {}
+            },
+            {
+                "qkdi_id": "301",
+                "qkdi_att_point": {"device": current_ip, "port": "3001"},
+                "qkdi_capabilities": {}
+            }
+        ]
+    },
+    "qkd_links": {"qkd_link": []}
+}
diff --git a/src/tests/tools/mock_qkd_node/requirements.in b/src/tests/tools/mock_qkd_node/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..cb2f89992568a95a38b6c19d9632a5705320400c
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/requirements.in
@@ -0,0 +1,19 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Flask==2.1.3
+Flask-HTTPAuth==4.5.0
+Flask-RESTful==0.3.9
+gunicorn==23.0.0
+werkzeug==2.3.7
diff --git a/src/tests/tools/mock_qkd_node/run.sh b/src/tests/tools/mock_qkd_node/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ecef1d5d52f462a5f92f8f700d61b5b7b322a75d
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/run.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make folder containing the script the root folder for its execution
+cd $(dirname $0)
+
+python MockIetfActnSdnCtrl.py
diff --git a/src/tests/tools/mock_qkd_node/tests.py b/src/tests/tools/mock_qkd_node/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..b112595dd42e3e73257cf8e22c0093eb678829f9
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/tests.py
@@ -0,0 +1,43 @@
+import json
+from mock import requests
+import pyangbind.lib.pybindJSON as enc
+from pyangbind.lib.serialise import pybindJSONDecoder as dec
+from yang.sbi.qkd.templates.etsi_qkd_sdn_node import etsi_qkd_sdn_node
+
+module = etsi_qkd_sdn_node()
+url = 'https://1.1.1.1/restconf/data/etsi-qkd-sdn-node:qkd_node/'
+
+# Get node all info
+z = requests.get(url).json()
+var = dec.load_json(z, None, None, obj=module)
+print(enc.dumps(var))
+
+# Reset module variable because it is already filled
+module = etsi_qkd_sdn_node()
+
+# Get node basic info
+node = module.qkd_node
+z = requests.get(url).json()
+var = dec.load_json(z, None, None, obj=node)
+print(enc.dumps(var))
+
+# Get all apps
+apps = node.qkd_applications
+z = requests.get(url + 'qkd_applications').json()
+var = dec.load_json(z, None, None, obj=apps)
+print(enc.dumps(var))
+
+# Edit app 0
+app = apps.qkd_app['00000000-0001-0000-0000-000000000000']
+app.client_app_id = 'id_0'
+requests.put(url + 'qkd_applications/qkd_app=00000000-0001-0000-0000-000000000000', json=json.loads(enc.dumps(app)))
+
+# Create app 1
+app = apps.qkd_app.add('00000000-0001-0000-0000-000000000001')
+requests.post(url + 'qkd_applications/qkd_app=00000000-0001-0000-0000-000000000001', json=json.loads(enc.dumps(app)))
+
+# Get all apps
+apps = node.qkd_applications
+z = requests.get(url + 'qkd_applications').json()
+var = dec.load_json(z, None, None, obj=apps)
+print(enc.dumps(var))
diff --git a/src/tests/tools/mock_qkd_node/wsgi.py b/src/tests/tools/mock_qkd_node/wsgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2574218af2244ce22c7536060c7136f6bf339ef
--- /dev/null
+++ b/src/tests/tools/mock_qkd_node/wsgi.py
@@ -0,0 +1,211 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools, json, logging, os, time
+from flask import Flask, request
+from flask_restful import Api
+from ResourceNetworkSlices import NetworkSliceService, NetworkSliceServices
+from ResourceConnectionGroups import ConnectionGroup
+from YangValidator import YangValidator
+
+LOG_LEVEL = logging.DEBUG
+
+logging.basicConfig(
+    level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
+)
+LOGGER = logging.getLogger(__name__)
+
+logging.getLogger('werkzeug').setLevel(logging.WARNING)
+
+
+
+
+BASE_URL = '/restconf/data/etsi-qkd-sdn-node:qkd_node'
+
+yang_validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
+
+def log_request(logger: logging.Logger, response):
+    timestamp = time.strftime("[%Y-%b-%d %H:%M]")
+    logger.info(
+        "%s %s %s %s %s",
+        timestamp,
+        request.remote_addr,
+        request.method,
+        request.full_path,
+        response.status,
+    )
+    return response
+
+app = Flask(__name__)
+app.after_request(functools.partial(log_request, LOGGER))
+
+api = Api(app, prefix=BASE_URL)
+api.add_resource(NetworkSliceServices, "")
+api.add_resource(NetworkSliceService, "/slice-service=<string:slice_id>")
+api.add_resource(
+    ConnectionGroup,
+    "/slice-service=<string:slice_id>/connection-groups/connection-group=<string:connection_group_id>",
+)
+
+
+
+
+
+
+
+
+
+
+
+def get_side_effect(url):
+    steps = url.lstrip('https://').lstrip('http://').rstrip('/')
+    parts = steps.split('/')
+    
+    # Ensure there are enough parts to unpack
+    if len(parts) < 4:
+        raise ValueError(f"Expected at least 4 parts in the URL, got {len(parts)}: {steps}")
+
+    ip_port, _, _, header, *steps = parts
+
+    header_splitted = header.split(':')
+
+    module = header_splitted[0]
+    assert(module == 'etsi-qkd-sdn-node')
+
+    if ip_port.startswith('127.0.0.1'):
+        ip_port = ip_port.replace('127.0.0.1', current_ip)
+
+    tree = {'qkd_node': nodes[ip_port]['node'].copy()}
+
+    if len(header_splitted) == 1 or not header_splitted[1]:
+        value = nodes[ip_port].copy()
+        value.pop('node')
+        tree['qkd_node'].update(value)
+
+        return tree, tree
+    
+    root = header_splitted[1]
+    assert(root == 'qkd_node')
+
+    if not steps:
+        return tree, tree
+
+    endpoint, *steps = steps
+    
+    value = nodes[ip_port][endpoint]
+
+    if not steps:
+        return_value = {endpoint: value}
+        tree['qkd_node'].update(return_value)
+
+        return return_value, tree
+
+    raise Exception('Url too long')
+
+def edit(from_dict, to_dict, create):
+    for key, value in from_dict.items():
+        if isinstance(value, dict):
+            if key not in to_dict and create:
+                to_dict[key] = {}
+            edit(from_dict[key], to_dict[key], create)
+        elif isinstance(value, list):
+            to_dict[key].extend(value)
+        else:
+            to_dict[key] = value
+
+@app.get('/', defaults={'path': ''})
+@app.get("/<string:path>")
+@app.get('/<path:path>')
+def get(path):
+    try:
+        msg, msg_validate = get_side_effect(request.base_url)
+        print(msg_validate)
+        yang_validator.parse_to_dict(msg_validate)
+        return msg
+    except ValueError as e:
+        return {'error': str(e)}, 400
+
+@app.post('/', defaults={'path': ''})
+@app.post("/<string:path>")
+@app.post('/<path:path>')
+def post(path):
+    try:
+        steps = request.base_url.lstrip('https://').lstrip('http://').rstrip('/')
+        parts = steps.split('/')
+
+        # Ensure there are enough parts to unpack
+        if len(parts) < 4:
+            return {'success': False, 'reason': f"Expected at least 4 parts in the URL, got {len(parts)}: {steps}"}
+
+        ip_port, _, _, header, *steps = parts
+
+        module, root = header.split(':')
+
+        assert module == 'etsi-qkd-sdn-node'
+        assert root == 'qkd_node'
+
+        if not steps:
+            edit(request.json, nodes[ip_port]['node'], True)
+            return {'success': True, 'reason': ''}
+
+        endpoint, *steps = steps
+
+        if not steps:
+            edit(request.json[endpoint], nodes[ip_port][endpoint], True)
+            return {'success': True, 'reason': ''}
+
+        return {'success': False, 'reason': 'Url too long'}
+
+    except Exception as e:
+        return {'success': False, 'reason': str(e)}
+
+@app.route('/', defaults={'path': ''}, methods=['PUT', 'PATCH'])
+@app.route("/<string:path>", methods=['PUT', 'PATCH'])
+@app.route('/<path:path>', methods=['PUT', 'PATCH'])
+def patch(path):
+    success = True
+    reason = ''
+    try:
+        edit_side_effect(request.base_url, request.json, False)
+def edit_side_effect(url, json, create):
+    steps = url.lstrip('https://').lstrip('http://').rstrip('/')
+    parts = steps.split('/')
+    
+    # Ensure there are enough parts to unpack
+    if len(parts) < 4:
+        raise ValueError(f"Expected at least 4 parts in the URL, got {len(parts)}: {steps}")
+
+    ip_port, _, _, header, *steps = parts
+
+    module, root = header.split(':')
+
+    assert(module == 'etsi-qkd-sdn-node')
+    assert(root == 'qkd_node')
+
+    if not steps:
+        edit(json, nodes[ip_port]['node'], create)
+        return
+
+    endpoint, *steps = steps
+
+    if not steps:
+        edit(json[endpoint], nodes[ip_port][endpoint], create)
+        return
+
+    raise Exception('Url too long')
+
+    except Exception as e:
+        reason = str(e)
+        success = False
+    return {'success': success, 'reason': reason}
diff --git a/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang b/src/tests/tools/mock_qkd_node/yang/etsi-qkd-node-types.yang
similarity index 100%
rename from src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-node-types.yang
rename to src/tests/tools/mock_qkd_node/yang/etsi-qkd-node-types.yang
diff --git a/src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang b/src/tests/tools/mock_qkd_node/yang/etsi-qkd-sdn-node.yang
similarity index 100%
rename from src/tests/tools/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang
rename to src/tests/tools/mock_qkd_node/yang/etsi-qkd-sdn-node.yang
diff --git a/src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang b/src/tests/tools/mock_qkd_node/yang/ietf-inet-types.yang
similarity index 100%
rename from src/tests/tools/mock_qkd_nodes/yang/ietf-inet-types.yang
rename to src/tests/tools/mock_qkd_node/yang/ietf-inet-types.yang
diff --git a/src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang b/src/tests/tools/mock_qkd_node/yang/ietf-yang-types.yang
similarity index 100%
rename from src/tests/tools/mock_qkd_nodes/yang/ietf-yang-types.yang
rename to src/tests/tools/mock_qkd_node/yang/ietf-yang-types.yang
diff --git a/src/tests/tools/mock_qkd_nodes/wsgi.py b/src/tests/tools/mock_qkd_nodes/wsgi.py
deleted file mode 100644
index fde3c6cd024e96cb7693bb0f3036757b3177e353..0000000000000000000000000000000000000000
--- a/src/tests/tools/mock_qkd_nodes/wsgi.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-from flask import Flask, request
-from YangValidator import YangValidator
-
-app = Flask(__name__)
-
-
-yang_validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
-
-
-nodes = {
-    '10.0.2.10:11111': {'node': {
-            'qkdn_id': '00000001-0000-0000-0000-000000000000',
-        },
-        'qkdn_capabilities': {
-        },
-        'qkd_applications': {
-            'qkd_app': [
-                {
-                    'app_id': '00000001-0001-0000-0000-000000000000',           
-                    'client_app_id': [],
-                    'app_statistics': {
-                        'statistics': []
-                    },
-                    'app_qos': {
-                    },
-                    'backing_qkdl_id': []
-                }
-            ]
-        },
-        'qkd_interfaces': {
-            'qkd_interface': [
-                {
-                    'qkdi_id': '100',
-                    'qkdi_att_point': {
-                    },
-                    'qkdi_capabilities': {
-                    }
-                },
-                {
-                    'qkdi_id': '101',
-                    'qkdi_att_point': {
-                        'device':'10.0.2.10',
-                        'port':'1001'
-                    },
-                    'qkdi_capabilities': {
-                    }
-                }
-            ]
-        },
-        'qkd_links': {
-            'qkd_link': [
-
-            ]
-        }
-    },
-
-    '10.0.2.10:22222': {'node': {
-            'qkdn_id': '00000002-0000-0000-0000-000000000000',
-        },
-        'qkdn_capabilities': {
-        },
-        'qkd_applications': {
-            'qkd_app': [
-                {
-                    'app_id': '00000002-0001-0000-0000-000000000000',           
-                    'client_app_id': [],
-                    'app_statistics': {
-                        'statistics': []
-                    },
-                    'app_qos': {
-                    },
-                    'backing_qkdl_id': []
-                }
-            ]
-        },
-        'qkd_interfaces': {
-            'qkd_interface': [
-                {
-                    'qkdi_id': '200',
-                    'qkdi_att_point': {
-                    },
-                    'qkdi_capabilities': {
-                    }
-                },
-                {
-                    'qkdi_id': '201',
-                    'qkdi_att_point': {
-                        'device':'10.0.2.10',
-                        'port':'2001'
-                    },
-                    'qkdi_capabilities': {
-                    }
-                },
-                {
-                    'qkdi_id': '202',
-                    'qkdi_att_point': {
-                        'device':'10.0.2.10',
-                        'port':'2002'
-                    },
-                    'qkdi_capabilities': {
-                    }
-                }
-            ]
-        },
-        'qkd_links': {
-            'qkd_link': [
-
-            ] 
-        }
-    },
-
-    '10.0.2.10:33333': {'node': {
-            'qkdn_id': '00000003-0000-0000-0000-000000000000',
-        },
-        'qkdn_capabilities': {
-        },
-        'qkd_applications': {
-            'qkd_app': [
-                {
-                    'app_id': '00000003-0001-0000-0000-000000000000',           
-                    'client_app_id': [],
-                    'app_statistics': {
-                        'statistics': []
-                    },
-                    'app_qos': {
-                    },
-                    'backing_qkdl_id': []
-                }
-            ]
-        },
-        'qkd_interfaces': {
-            'qkd_interface': [
-                {
-                    'qkdi_id': '300',
-                    'qkdi_att_point': {
-                    },
-                    'qkdi_capabilities': {
-                    }
-                },
-                {
-                    'qkdi_id': '301',
-                    'qkdi_att_point': {
-                        'device':'10.0.2.10',
-                        'port':'3001'
-                    },
-                    'qkdi_capabilities': {
-                    }
-                }
-            ]
-        },
-        'qkd_links': {
-            'qkd_link': [
-
-            ]
-        }
-    }
-}
-
-
-def get_side_effect(url):
-
-    steps = url.lstrip('https://').lstrip('http://').rstrip('/')
-    ip_port, _, _, header, *steps = steps.split('/')
-
-    header_splitted = header.split(':')
-
-    module = header_splitted[0]
-    assert(module == 'etsi-qkd-sdn-node')
-
-    tree = {'qkd_node': nodes[ip_port]['node'].copy()}
-
-    if len(header_splitted) == 1 or not header_splitted[1]:
-        value = nodes[ip_port].copy()
-        value.pop('node')
-        tree['qkd_node'].update(value)
-
-        return tree, tree
-    
-    root = header_splitted[1]
-    assert(root == 'qkd_node')
-
-    if not steps:
-        return tree, tree
-
-
-    endpoint, *steps = steps
-    
-    value = nodes[ip_port][endpoint]
-
-    if not steps:
-        return_value = {endpoint:value}
-        tree['qkd_node'].update(return_value)
-
-        return return_value, tree
-
-    
-
-    '''
-    element, *steps = steps
-
-    container, key = element.split('=')
-    
-    # value = value[container][key]
-
-    if not steps:
-        return_value['qkd_node'][endpoint] = [value]
-        return return_value
-
-    '''
-    raise Exception('Url too long')
-
-        
-
-def edit(from_dict, to_dict, create):
-    for key, value in from_dict.items():
-        if isinstance(value, dict):
-            if key not in to_dict and create:
-                to_dict[key] = {}
-            edit(from_dict[key], to_dict[key], create)
-        elif isinstance(value, list):
-            to_dict[key].extend(value)
-        else:
-            to_dict[key] = value
-
-
-
-def edit_side_effect(url, json, create):
-    steps = url.lstrip('https://').lstrip('http://').rstrip('/')
-    ip_port, _, _, header, *steps = steps.split('/')
-
-    module, root = header.split(':')
-
-    assert(module == 'etsi-qkd-sdn-node')
-    assert(root == 'qkd_node')
-
-    if not steps:
-        edit(json, nodes[ip_port]['node'])
-        return
-
-    endpoint, *steps = steps
-
-    if not steps:
-        edit(json[endpoint], nodes[ip_port][endpoint], create)
-        return
-
-
-    '''
-    element, *steps = steps
-
-    container, key = element.split('=')
-
-    if not steps:
-        if key not in nodes[ip_port][endpoint][container] and create:
-            nodes[ip_port][endpoint][container][key] = {}
-
-        edit(json, nodes[ip_port][endpoint][container][key], create)
-        return 0
-    '''
-    
-    raise Exception('Url too long')
-
-
-
-
-
-
-@app.get('/', defaults={'path': ''})
-@app.get("/<string:path>")
-@app.get('/<path:path>')
-def get(path):
-    msg, msg_validate = get_side_effect(request.base_url)
-    print(msg_validate)
-    yang_validator.parse_to_dict(msg_validate)
-    return msg
-
-
-@app.post('/', defaults={'path': ''})
-@app.post("/<string:path>")
-@app.post('/<path:path>')
-def post(path):
-    success = True
-    reason = ''
-    try:
-        edit_side_effect(request.base_url, request.json, True)
-    except Exception as e:
-        reason = str(e)
-        success = False
-    return {'success': success, 'reason': reason}
-    
-
-
-@app.route('/', defaults={'path': ''}, methods=['PUT', 'PATCH'])
-@app.route("/<string:path>", methods=['PUT', 'PATCH'])
-@app.route('/<path:path>', methods=['PUT', 'PATCH'])
-def patch(path):
-    success = True
-    reason = ''
-    try:
-        edit_side_effect(request.base_url, request.json, False)
-    except Exception as e:
-        reason = str(e)
-        success = False
-    return {'success': success, 'reason': reason}
-
-
-
-
-
-# import json
-# from mock import requests
-# import pyangbind.lib.pybindJSON as enc
-# from pyangbind.lib.serialise import pybindJSONDecoder as dec
-# from yang.sbi.qkd.templates.etsi_qkd_sdn_node import etsi_qkd_sdn_node
-
-# module = etsi_qkd_sdn_node()
-# url = 'https://1.1.1.1/restconf/data/etsi-qkd-sdn-node:'
-
-# # Get node all info
-# z = requests.get(url).json()
-# var = dec.load_json(z, None, None, obj=module)
-# print(enc.dumps(var))
-
-
-# Reset module variable because it is already filled
-# module = etsi_qkd_sdn_node()
-
-# # Get node basic info
-# node = module.qkd_node
-# z = requests.get(url + 'qkd_node').json()
-# var = dec.load_json(z, None, None, obj=node)
-# print(enc.dumps(var))
-
-
-# # Get all apps
-# apps = node.qkd_applications
-# z = requests.get(url + 'qkd_node/qkd_applications').json()
-# var = dec.load_json(z, None, None, obj=apps)
-# print(enc.dumps(var))
-
-# # Edit app 0
-# app = apps.qkd_app['00000000-0001-0000-0000-000000000000']
-# app.client_app_id = 'id_0'
-# requests.put(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000000', json=json.loads(enc.dumps(app)))
-
-# # Create app 1
-# app = apps.qkd_app.add('00000000-0001-0000-0000-000000000001')
-# requests.post(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000001', json=json.loads(enc.dumps(app)))
-
-# # Get all apps
-# apps = node.qkd_applications
-# z = requests.get(url + 'qkd_node/qkd_applications').json()
-# var = dec.load_json(z, None, None, obj=apps)
-# print(enc.dumps(var))