diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..b895f09e10bf49f035ccf58760f4518f3ccf955f
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,5 @@
+__pycache__/
+*.pyc
+*.db
+.env
+.git/
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 9eac55be6c387aa78941998cb0664575b94a65fd..d4f18656d89df0abc25f8d93dcf2336fce662355 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,5 @@ __pycache__/
 swagger/__pycache__/
 src/__pycache__/
 venv/
-
+.env
+slice.db
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a50341ed8b82e398eaec3a3f02ed93d75168b411
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,81 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+stages:
+  - build
+  - unit_test
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build nsc:
+  variables:
+    IMAGE_NAME: 'nsc'
+    IMAGE_TAG: 'test'
+  stage: build
+  before_script:
+    - docker image prune --force
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker image prune --force
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+
+# Apply unit test to the component
+unit_test nsc:
+  timeout: 15m
+  variables:
+    IMAGE_NAME: 'nsc' # name of the microservice
+    IMAGE_TAG: 'test' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build nsc
+  before_script:
+    # Do Docker cleanup
+    - docker ps --all --quiet | xargs --no-run-if-empty docker stop
+    - docker container prune --force
+    - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force
+    - docker image prune --force
+    - docker network prune --force
+    - docker volume prune --all --force
+    - docker buildx prune --force
+
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker run --name $IMAGE_NAME -d -p 8081:8081 -v "$PWD/src/tests:/opt/results" $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - sleep 5
+    - docker ps -a
+    - docker logs $IMAGE_NAME
+    - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/report.xml"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    # Clean up
+    - docker ps --all --quiet | xargs --no-run-if-empty docker stop
+    - docker container prune --force
+    - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force
+    - docker network prune --force
+    - docker volume prune --all --force
+    - docker image prune --force
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+  artifacts:
+      when: always
+      reports:
+        junit: src/tests/report.xml
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..313b5e36cb8d52bb3586a6a57f597948227f5200
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,38 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+FROM python:3.12-slim
+
+# Stablish woking directory
+WORKDIR /app
+
+# Install system dependencies
+RUN apt-get update -qq && \
+    apt-get install -y -qq git python3-dev && \
+    apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Copy project content
+COPY . /app
+
+# Intall python dependencies
+RUN pip install --upgrade pip && \
+    pip install -r requirements.txt
+
+# Expose port
+EXPOSE 8081
+
+# Init command
+ENTRYPOINT ["python3", "app.py"]
diff --git a/README.md b/README.md
index 6eda6c131858ced75e65fd492cf138755252adfe..c05f28cfeff08dd67c34bfa3df805c7f5dc744d1 100644
--- a/README.md
+++ b/README.md
@@ -1,16 +1,58 @@
-# Network Slice Controller (NSC) Architecture
-
-The Network Slice Controller (NSC) is a component defined by the IETF to orchestrate the request, realization, and lifecycle control of network slices. It consists of two main modules: the mapper and the realizer. 
+# Network Slice Controller (NSC)
+
+The Network Slice Controller (NSC) is a component defined by the IETF to orchestrate the request, realization, and lifecycle control of IETF Network Slices.
+
+---
+
+## 📑 Table of Contents
+1. [Overview](#overview)  
+2. [Main Components](#main-components)  
+   - [NBI Processor](#nbi-processor)  
+   - [Mapper](#mapper)  
+   - [Realizer](#realizer)  
+   - [Slice Database](#slice-database)  
+3. [Workflow](#workflow)  
+4. [Architecture](#architecture)  
+5. [API](#api)  
+6. [WebUI](#webui)  
+7. [Requirements](#requirements)  
+8. [Configuration](#configuration)  
+   - [Logging](#logging)  
+   - [General](#general)  
+   - [Mapper](#mapper-1)  
+   - [Realizer](#realizer-1)  
+   - [Teraflow Configuration](#teraflow-configuration)  
+   - [Ixia Configuration](#ixia-configuration)  
+   - [WebUI](#webui-1)  
+9. [Usage](#usage)  
+10. [Available Branches and Releases](#available-branches-and-releases)
+
+---
 
 ## Overview
 
-The NSC handles end-to-end network slice requests originating from 5G customers. These requests are managed by the 5G end-to-end orchestrator, which configures RAN and Core Network elements accordingly and passes the request to the NSC for processing. The NSC then interacts with relevant network controllers to implement the network slice into the transport network.
+The NSC takes requests for IETF Network Slice Services and implements them using a suitable underlay technology. The NSC is the key component for control and management of the IETF Network Slices.  It provides the creation/modification/deletion, monitoring, and optimization of IETF Network Slices in a multi-domain, multi-technology, and multi-vendor environment.
+
+The main task of the NSC is to map abstract IETF Network Slice Service requirements to concrete technologies and establish required connectivity, ensuring that resources are allocated to slice as necessary.
+The IETF Network Slice Service Interface is used for communicating details of an IETF Network Slice Service (configuration, selected policies, operational state, etc.) as well as information about status and performance of the IETF Network Slice.  
+
+The NSC also handles end-to-end network slice requests originating from 5G customers. These requests are managed by the 5G end-to-end orchestrator, which configures RAN and Core Network elements accordingly and passes the request to the NSC for processing. The NSC then interacts with relevant network controllers to implement the network slice into the transport network.
+
+## Main Components
 
-## Main Modules
+### NBI Processor
+
+This component manages the requests entering the system. There are 4 kinds of requests:
+- Create: when this request arrives, the NBI processor checks the body of the request to analyze the format. If it is 3GPP NRM, it translates it into IETF Slice Service Request. If it is the latter, it propagates the request into the mapper. This function is able to process each request independently, even if several requests with different formats come together in the same request. Once created, the slice is stored in the slice database.
+- Delete: this request can be referring to a specific slice or to all slices configured. In any case, a deletion request is send to the southbound controller  and the referred slices are deleted from the slice database
+- Get: this request can be referring to a specific slice or to all slices configured. In any case, the referred slices in the request are returned from the slice database.
+- Modify: this request allows changing the configuration of an existing slice. It works similarly to the create request, depending on the format present in the request, it is translated or not. Once processed, a modify request is sent to the controller and the referred slice is updated in the slice database.
+
+Detail of how customers can make different requests is provided in the API section
 
 ### Mapper
 
-The mapper processes client network slice requests and correlates them with existing slices. When a slice request arrives, the mapper translates it by converting the request expressed in 3GPP NRM terms into the IETF NBI data model. This involves identifying the service demarcation points (SDPs) that define the connectivity in the transport network. Once these parameters are identified and mapped into the data model, the next step is to check the feasibility of implementing the slice request.
+As defined by IETF the mapper receives slices service requests from customers and process them to obtain an overall view of how this new request complements the rest of slices.
 
 Realizing a slice requires an existing network resource partition (NRP) with the specified slice requirements, which may not be available at the time of the request. This information will be retrieved from an external module, which is beyond the scope of this definition. This module will provide a response regarding the feasibility of realizing the slice.
 
@@ -18,80 +60,132 @@ If there are no available NRPs for instantiating the slice, the mapper will requ
 
 ### Realizer
 
-The realizer module determines the realization of each slice by interacting with specific network controllers. This version is currently working with Teraflow SDN controller. It receives requests from the mapper and decides on the technologies to be used to instantiate the slice based on the selected NRP associated with the slice. For example, Layer 2 VPN is the technology employed to realize network slices in this version. To achieve this, the realizer generates a request for the network controller to establish a Layer 2 VPN between two SDPs with the requirements specified in the slice request.
+The realizer implements slices by interacting with specific network controllers. It receives requests from the mapper and should have the intelligence to select the most adequate realizing technology to realize the slice. As it currently does not have that intelligence, it is supposed to use a fixed technology or one that selects the customer.
 
-## Workflow
+The NSC operates over the TeraflowSDN controller and the IXIA NE II device.
 
-1. **Request Initiation**: Network slice request originates from a 5G customer and is managed by the 5G end-to-end orchestrator.
+[TeraflowSDN](https://labs.etsi.org/rep/tfs/controller) is an open-source cloud native SDN controller enabling smart connectivity services for future networks beyond 5G. It allows establishing different services into a connected topology. The services currently supported are layer 2 VPNs and layer 3 VPNs. Therefore, the NSC has two specific functions that will be called “Realizing modules” that translate the IETF Slice Service Request into a request for deploying a VPN in TeraflowSDN: ``tfs_l2vpn`` and ``tfs_l3vpn``. TFS accepts two options to deploy its services, uploading a service via it webUI using a proprietary descriptor, or using an standardized interface via its NBI based on the L2SM and L3SM IETF YANG Models. The second option is the preferred one, as it follows and standardized approach, although both options are supported.
 
-2. **Mapper Processing**: Converts the request into the IETF NBI data model, identifies SDPs, and checks feasibility.
+The IXIA NEII is a device that allows emulating network impairments. In this context, it is used to simplify configurations in the data plane and offered channel characteristics as a proof of concept while focusing on the specific configurations on the control plane. The characteristics that can be emulated over a channel are: Ips, VLAN, bandwidth, latency, delay variance (which is requested as tolerance), packet disorder (which is requested as reliability). When the realizer receives a request, it translates it into a proprietary template with the specified characteristics to be consumed by the device API.
 
-3. **Realizer Action**: Determines technology (e.g., Layer 2 VPN) and interacts with network controllers to instantiate the slice.
+### Slice Database
+
+The slice database is updated after each request by adding, removing or updating the stored slices. It contains two fields: 
+- ``slice_id``: it stores a unique identifier for the slice, serving as primary key, and is mapped from the id value present in the IETF Slice Service Model
+- ``intent``: it stores an object with the whole IETF Slice Service Model, that contains the characteristics and endpoints of the slice
+
+
+## Workflow
+
+1.	A request comes into the NSC NBI
+2.	If it is a GET request, the slice database is consulted. If it is a POST request, the NBI processor inspects the body of the request
+3.	If it is in 3GPP format, it is translated to IETF Slice Service Request. If not, it is sent directly to the mapper
+4.	The mapper processes the request and interacts with the planner when activated
+5.	The planner processes the request, populates it with the optimal path and returns it to the mapper
+6.	The mapper sends the request to the realizer, which selects a realization technology
+7.	The realization module translate the request to the controller specific configuration
+8.	The realizer sends the request to the controller and updates the database with the new slice
 
-4. **Implementation**: Network controllers configure the transport network as per the slice requirements.
 
 ## Arquitecture
 
  +## API
+
+The API has two namespaces: tfs and ixia, one dedicated to each controller, with the operations POST, GET, PUT and DELETE
+- `GET /{namespace}/slice`: returns a list with all transport network slices currently available in the controller.
+- `POST /{namespace}/slice`: allows the submission of a new network slice request
+- `DELETE /{namespace}/slice`: deletes all transport network slices stored in the controller.
+- `GET /{namespace}/slice/{slice_id}`: retrieves detailed information about a specific transport network slice identified by its slice_id 
+- `DELETE /{namespace}/slice/{slice_id}`: deletes a specific transport network slice identified by its slice_id
+- `PUT /{namespace}/slice/{slice_id}`: modifies a specific transport network slice identified by its slice_id
+
+The API is available in the swagger documentation panel at `{ip}:{NSC_PORT}/nsc`
+
+## WebUI
+
+The WebUI is a graphical interface that allows operating the NSC. Currently, it has more limited operations than the API. It supports the creation of slices in both Teraflow and IXIA controllers, as well as getting information of the current slices. Modify and deletion is not yet supported. 
+
+It is accessed at `{ip}:{NSC_PORT}/webui`
+
 ## Requirements
 - Python3.12
 - python3-pip
 - python3-venv
 
-## Configuration Constants
+## Configuration
 
-In the main configuration file, several constants can be adjusted to customize the Network Slice Controller (NSC) behavior:
+In the `src/config/.env.example` file, several constants can be adjusted to customize the Network Slice Controller (NSC) behaviour:
 
 ### Logging
 - `DEFAULT_LOGGING_LEVEL`: Sets logging verbosity
-  - Default: `logging.INFO`
-  - Options: `logging.DEBUG`, `logging.INFO`, `logging.WARNING`, `logging.ERROR`
-
-### Server
-- `NSC_PORT`: Server port
-  - Default: `8081`
-
-### Paths
-- `SRC_PATH`: Absolute path to source directory
-- `TEMPLATES_PATH`: Path to templates directory
+  - Default: `INFO`
+  - Options: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `NOTSET`, `CRITICAL`
+
+### General
+- `DUMP_TEMPLATES`: Flag to deploy templates for debugging
+  - Default: `false`
+
+## Mapper
+- `NRP_ENABLED`: Flag to determine if the NSC performs NRPs
+  - Default: `false`
+- `PLANNER_ENABLED`: Flag to activate the planner
+  - Default: `false`
+- `PCE_EXTERNAL`: Flag to determine if external PCE is used
+  - Default: `false`
+- `PLANNER_TYPE`:  Type of planner to be used
+  - Default: `ENERGY`
+  - Options: `ENERGY`, `HRAT`, `TFS_OPTICAL`
+- `HRAT_IP`: HRAT planner IP
+  - Default: `10.0.0.1`
+- `OPTICAL_PLANNER_IP`: Optical planner IP
+  - Default: `10.0.0.1`
+
+## Realizer
+- `DUMMY_MODE`: If true, no config sent to controllers
+  - Default: `true`
 
 ### Teraflow Configuration
-- `TFS_UPLOAD`: Enable/disable uploading slice service to Teraflow
-  - Default: `False`
+- `UPLOAD_TYPE`: Configure type of upload to Teraflow
+  - Default: `WEBUI`
+  - Options: `WEBUI`, `NBI`
 - `TFS_IP`: Teraflow SDN controller IP
-  - Default: `"192.168.165.10"`
+  - Default: `"127.0.0.1"`
 - `TFS_L2VPN_SUPPORT`: Enable additional L2VPN configuration support
   - Default: `False`
 
-## Usage
+### Ixia Configuration
+- `IXIA_IP`: Ixia NEII IP
+  - Default: `"127.0.0.1"`
 
-To deploy and execute the NSC, follow these steps:
+### WebUI
+- `WEBUI_DEPLOY`: Flag to deploy WebUI
+  - Default: `False`
 
-0. **Preparation**
-    ```
-    git clone https://labs.etsi.org/rep/tfs/nsc.git
-    cd nsc
-    python3 -m venv venv
-    source venv/bin/activate
-    pip install -r requirements.txt
-    ```
+## Usage
+
+To use the NSC, just build the image an run it in a container following these steps:
 
-1. **Start NSC Server**:
+1. **Deploy**
     ```
-    python3 app.py
+    ./deploy.sh
     ```
 
-2. **Generate Slice Requests**:
+2. **Send Slice Requests**:
 
-    To send slice request, the NSC accepts POST request at the endpoint /slice. It is available in the swagger documentation panel at {ip}:{NSC_PORT}/nsc
+    Send slice requests via **API** (/nsc) or **WebUI** (/webui)
 
-
+- The branches `release/X.Y.Z`, point to the code for the different release versions indicated in the branch name.
+  - Code in these branches can be considered stable, and no new features are planned.
+  - In case of bugs, point releases increasing revision number (Z) might be created.
 
+- The `develop` ([](https://labs.etsi.org/rep/tfs/nsc/-/commits/develop) [](https://labs.etsi.org/rep/tfs/nsc/-/commits/develop)) branch is the main development branch and contains the latest contributions.
+  - **Use it with care! It might not be stable.**
+  - The latest developments and contributions are added to this branch for testing and validation before reaching a release.
diff --git a/app.py b/app.py
index 61503b3b4600483708559bac25bee7cb4588a2a6..c7c0695591b935ba00b611ff9828d398118a2514 100644
--- a/app.py
+++ b/app.py
@@ -14,36 +14,51 @@
 
 # This file is an original contribution from Telefonica Innovación Digital S.L.
 
-import os
+import logging
 from flask import Flask
 from flask_restx import Api
 from flask_cors import CORS
 from swagger.tfs_namespace import tfs_ns
 from swagger.ixia_namespace import ixia_ns
-from src.Constants import NSC_PORT, WEBUI_DEPLOY
+from swagger.E2E_namespace import e2e_ns
+from src.config.constants import NSC_PORT
 from src.webui.gui import gui_bp
+from src.config.config import create_config
+from src.database.db import init_db
 
-app = Flask(__name__)
-CORS(app)
+def create_app():
+    """Create Flask application with configured API and namespaces."""
+    init_db()
+    app = Flask(__name__)
+    app = create_config(app)
+    CORS(app)
 
-# Create API instance
-api = Api(
-    app,
-    version="1.0",
-    title="Network Slice Controller (NSC) API",
-    description="API for orchestrating and realizing transport network slice requests",
-    doc="/nsc"  # Swagger UI URL
-)
+    # Configure logging to provide clear and informative log messages
+    logging.basicConfig(
+        level=app.config["LOGGING_LEVEL"],
+        format="%(levelname)s - %(message)s"
+    )
 
-# Register namespaces
-api.add_namespace(tfs_ns, path="/tfs")
-api.add_namespace(ixia_ns, path="/ixia")
-#gui_bp = Blueprint('gui', __name__, template_folder='templates')
+    # Create API instance
+    api = Api(
+        app,
+        version="1.0",
+        title="Network Slice Controller (NSC) API",
+        description="API for orchestrating and realizing transport network slice requests",
+        doc="/nsc"  # Swagger UI URL
+    )
 
-if WEBUI_DEPLOY:
-    app.secret_key = 'clave-secreta-dev' 
-    app.register_blueprint(gui_bp)
+    # Register namespaces
+    api.add_namespace(tfs_ns, path="/tfs")
+    api.add_namespace(ixia_ns, path="/ixia")
+    api.add_namespace(e2e_ns, path="/e2e")
 
+    if app.config["WEBUI_DEPLOY"]:
+        app.secret_key = "clave-secreta-dev"
+        app.register_blueprint(gui_bp)
+
+    return app
 
 if __name__ == "__main__":
+    app = create_app()
     app.run(host="0.0.0.0", port=NSC_PORT, debug=True)
diff --git a/deploy.sh b/deploy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8bd99bfca93f7a6126fec16290a73a8eab25fd78
--- /dev/null
+++ b/deploy.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+# Container name
+CONTAINER_NAME=nsc
+
+# Verify if docker is active
+if ! docker info > /dev/null 2>&1; then
+    echo "Error: Docker not running. Please, restart Docker service and try again."
+    exit 1
+fi
+
+# Stop container if running
+echo "Verify if '$CONTAINER_NAME' is running..."
+if [ $(docker ps -q -f name=$CONTAINER_NAME) ]; then
+    echo "Stopping current container '$CONTAINER_NAME'..."
+    docker stop $CONTAINER_NAME
+fi
+
+# Cleaning residual containers and images
+echo "Cleaning old Docker containers and images..."
+docker container prune -f
+docker image prune -f
+
+# Verificar que .env.example existe
+if [ ! -f src/config/.env.example ]; then
+    echo "Error: .env.example not found"
+    exit 1
+fi
+
+# Copy .env.example to .env
+echo "Generating .env file..."
+cp src/config/.env.example .env
+
+# Read NSC_PORT from .env
+NSC_PORT=$(grep '^NSC_PORT=' .env | cut -d '=' -f2)
+
+# Docker build
+echo "Building docker image..."
+docker build -t nsc .
+
+# Executing nsc
+echo "Running nsc on port $NSC_PORT..."
+docker run -d --env-file .env -p $NSC_PORT:$NSC_PORT --name $CONTAINER_NAME $CONTAINER_NAME
+echo "---READY---"
diff --git a/images/NSC_Architecture.png b/images/NSC_Architecture.png
index 852437d55f3fadcb9c6a4303be7c70a264977e30..7abb89ba0da61ad538251f5537a7731e44b2319a 100644
Binary files a/images/NSC_Architecture.png and b/images/NSC_Architecture.png differ
diff --git a/requirements.txt b/requirements.txt
index e01b5584ed4de910f98767f741c470b1c05637d5..6e8674fea7327e70902540cd2b1847086d5d1176 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,25 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
 Flask
 flask-cors
 flask-restx
 netmiko
 requests
+pandas
+dotenv
+coverage
+pytest
diff --git a/scripts/show_logs_nsc.sh b/scripts/show_logs_nsc.sh
new file mode 100644
index 0000000000000000000000000000000000000000..53a02fc5e5ee16c526ea5753501bba3e43688d05
--- /dev/null
+++ b/scripts/show_logs_nsc.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+docker logs nsc
\ No newline at end of file
diff --git a/src/Constants.py b/src/Constants.py
deleted file mode 100644
index 3b02ffd287c6eced608c00b993a71605fe53d0d4..0000000000000000000000000000000000000000
--- a/src/Constants.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-#     http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file includes original contributions from Telefonica Innovación Digital S.L.
-
-import logging, os, json
-
-# Default logging level
-DEFAULT_LOGGING_LEVEL = logging.INFO
-
-# Default port for NSC deployment
-NSC_PORT = 8081
-
-# Paths
-# Obtain the absolute path of the current file
-SRC_PATH = os.path.dirname(os.path.abspath(__file__))
-with open(os.path.join(SRC_PATH, 'IPs.json')) as f:
-    ips = json.load(f)
-
-# Create the path to the desired file relative to the current file
-TEMPLATES_PATH = os.path.join(SRC_PATH, "templates")
-
-# Dump templates
-DUMP_TEMPLATES = False
-
-# Mapper 
-
-# Flag to determine if the NSC performs NRPs
-NRP_ENABLED = False
-# Planner Flags
-PLANNER_ENABLED = True
-# Flag to determine if external PCE is used
-PCE_EXTERNAL = False
-
-# Realizer 
-
-# Controller Flags
-# If True, config is not sent to controllers
-DUMMY_MODE = False
-
-#####TERAFLOW#####
-# Teraflow IP
-TFS_IP = ips.get('TFS_IP')
-UPLOAD_TYPE = "WEBUI"  # "WEBUI" or "NBI"
-NBI_L2_PATH = "restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services"
-NBI_L3_PATH = "restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services"
-# Flag to determine if additional L2VPN configuration support is required for deploying L2VPNs with path selection
-TFS_L2VPN_SUPPORT = False
-
-#####IXIA#####
-# IXIA NEII IP
-IXIA_IP = ips.get('IXIA_IP')
-
-# WebUI
-
-# Flag to deploy the WebUI
-WEBUI_DEPLOY = True
\ No newline at end of file
diff --git a/src/api/main.py b/src/api/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cf60b81ebebba19ee02f8599db94644f91a6e98
--- /dev/null
+++ b/src/api/main.py
@@ -0,0 +1,216 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from src.utils.send_response import send_response
+import logging
+from flask import current_app
+from src.database.db import get_data, delete_data, get_all_data, delete_all_data
+from src.realizer.tfs.helpers.tfs_connector import tfs_connector
+from src.utils.safe_get import safe_get
+
+class Api:
+    def __init__(self, slice_service):
+        self.slice_service = slice_service
+    
+    def add_flow(self, intent):
+        """
+        Create a new transport network slice.
+
+        Args:
+            intent (dict): Network slice intent in 3GPP or IETF format
+
+        Returns:
+            Result of the Network Slice Controller (NSC) operation
+
+        API Endpoint:
+            POST /slice
+
+        Raises:
+            RuntimeError: If there is no content to process
+            Exception: For unexpected errors
+        """
+        try:
+            result = self.slice_service.nsc(intent)
+            if not result:
+                return send_response(False, code=404, message="No intents found")
+            logging.info(f"Slice created successfully")
+            return send_response(
+                True,
+                code=201,
+                data=result 
+            )
+        except RuntimeError as e:
+            # Handle case where there is no content to process
+            return send_response(False, code=200, message=str(e))
+        except Exception as e:
+            # Handle unexpected errors
+            return send_response(False, code=500, message=str(e))
+    
+    def get_flows(self,slice_id=None):
+        """
+        Retrieve transport network slice information.
+
+        This method allows retrieving:
+        - All transport network slices
+        - A specific slice by its ID
+
+        Args:
+            slice_id (str, optional): Unique identifier of a specific slice. 
+                                      Defaults to None.
+
+        Returns:
+            dict or list: 
+            - If slice_id is provided: Returns the specific slice details
+            - If slice_id is None: Returns a list of all slices
+            - Returns an error response if no slices are found
+
+        API Endpoint:
+            GET /slice/{id}
+
+        Raises:
+            ValueError: If no transport network slices are found
+            Exception: For unexpected errors
+        """
+        try:
+            # Read slice database from JSON file
+            content = get_all_data()
+            # If specific slice ID is provided, find and return matching slice
+            if slice_id:
+                for slice in content:
+                    if slice["slice_id"] == slice_id:
+                        return slice, 200
+                raise ValueError("Transport network slices not found")
+            # If no slices exist, raise an error
+            if len(content) == 0:
+                raise ValueError("Transport network slices not found")
+            
+            # Return all slices if no specific ID is given
+            return [slice for slice in content if slice.get("controller") == self.slice_service.controller_type], 200
+        
+        except ValueError as e:
+            # Handle case where no slices are found
+            return send_response(False, code=404, message=str(e))
+        except Exception as e:
+            # Handle unexpected errors
+            return send_response(False, code=500, message=str(e))
+    
+    def modify_flow(self,slice_id, intent):
+        """
+        Modify an existing transport network slice.
+
+        Args:
+            slice_id (str): Unique identifier of the slice to modify
+            intent (dict): New intent configuration for the slice
+
+        Returns:
+            Result of the Network Slice Controller (NSC) operation
+
+        API Endpoint:
+            PUT /slice/{id}
+        Raises:
+            Exception: For unexpected errors
+        """
+        try:
+            result = self.slice_service.nsc(intent, slice_id)
+            if not result:
+                return send_response(False, code=404, message="Slice not found")
+            logging.info(f"Slice {slice_id} modified successfully")
+            return send_response(
+                True,
+                code=200,
+                message="Slice modified successfully",
+                data=result
+            )
+        except ValueError as e:
+            # Handle case where no slices are found
+            return send_response(False, code=404, message=str(e))
+        except Exception as e:
+            # Handle unexpected errors
+            return send_response(False, code=500, message=str(e))
+    
+    def delete_flows(self, slice_id=None):
+        """
+        Delete transport network slice(s).
+
+        This method supports:
+        - Deleting a specific slice by ID
+        - Deleting all slices
+        - Optional cleanup of L2VPN configurations
+
+        Args:
+            slice_id (str, optional): Unique identifier of slice to delete. 
+                                      Defaults to None.
+
+        Returns:
+            dict: {} indicating successful deletion or error details
+
+        API Endpoint:
+            DELETE /slice/{id}
+
+        Raises:
+            ValueError: If no slices are found to delete
+            Exception: For unexpected errors
+
+        Notes:
+            - If controller_type is TFS, attempts to delete from Teraflow
+            - If need_l2vpn_support is True, performs additional L2VPN cleanup
+        """
+        try:
+            # Delete specific slice if slice_id is provided
+            if slice_id:
+                slice = get_data(slice_id)
+                # Raise error if slice not found
+                if not slice or slice.get("controller") != self.slice_service.controller_type:
+                    raise ValueError("Transport network slice not found")
+                # Delete in Teraflow
+                if not current_app.config["DUMMY_MODE"]:
+                    if self.slice_service.controller_type == "TFS":
+                        slice_type = safe_get(slice, ['intent', 'ietf-network-slice-service:network-slice-services', 'slice-service', 0, 'service-tags', 'tag-type', 0, 'tag-type-value', 0])
+                        if not slice_type:
+                            slice_type = "L2"
+                            logging.warning(f"Slice type not found in slice intent. Defaulting to L2")
+                        tfs_connector().nbi_delete(current_app.config["TFS_IP"],slice_type, slice_id)
+                # Update slice database
+                delete_data(slice_id)
+                logging.info(f"Slice {slice_id} removed successfully")
+                return {}, 204
+            
+            # Delete all slices
+            else:
+                # Optional: Delete in Teraflow if configured
+                if not current_app.config["DUMMY_MODE"]:
+                    if self.slice_service.controller_type == "TFS":
+                        content = get_all_data()
+                        for slice in content:
+                            if slice.get("controller") == self.slice_service.controller_type:
+                                slice_type = safe_get(slice, ['intent', 'ietf-network-slice-service:network-slice-services', 'slice-service', 0, 'service-tags', 'tag-type', 0, 'tag-type-value', 0])
+                                if not slice_type:
+                                    slice_type = "L2"
+                                    logging.warning(f"Slice type not found in slice intent. Defaulting to L2")
+                                tfs_connector().nbi_delete(current_app.config["TFS_IP"],slice_type, slice.get("slice_id"))
+                        if current_app.config["TFS_L2VPN_SUPPORT"]:
+                            self.slice_service.tfs_l2vpn_delete()
+
+                # Clear slice database
+                delete_all_data()
+
+                logging.info("All slices removed successfully")
+                return {}, 204
+        
+        except ValueError as e:
+            return send_response(False, code=404, message=str(e))
+        except Exception as e:
+            return send_response(False, code=500, message=str(e))
\ No newline at end of file
diff --git a/src/config/.env.example b/src/config/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..fb565b7443f1a3722aaab7a9b8fb7fc077cb027c
--- /dev/null
+++ b/src/config/.env.example
@@ -0,0 +1,69 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+# -------------------------
+# General
+# -------------------------
+NSC_PORT=8081
+# Options: CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
+LOGGING_LEVEL=INFO
+DUMP_TEMPLATES=false
+
+# -------------------------
+# Mapper
+# -------------------------
+# Flag to determine if the NSC performs NRPs
+NRP_ENABLED=false
+# Planner Flags
+PLANNER_ENABLED=true
+# Flag to determine if external PCE is used
+PCE_EXTERNAL=false
+# Type of planner to be used. Options: ENERGY, HRAT, TFS_OPTICAL
+PLANNER_TYPE=ENERGY
+# HRAT
+HRAT_IP=10.0.0.1
+# TFS_OPTICAL
+OPTICAL_PLANNER_IP=10.0.0.1
+
+# -------------------------
+# Realizer
+# -------------------------
+# If true, no config sent to controllers
+DUMMY_MODE=true
+
+# -------------------------
+# Teraflow
+# -------------------------
+TFS_IP=127.0.0.1
+# Options: WEBUI or NBI
+UPLOAD_TYPE=WEBUI
+# Flag to determine if additional L2VPN configuration support is required for deploying L2VPNs with path selection
+TFS_L2VPN_SUPPORT=false
+
+# -------------------------
+# IXIA
+# -------------------------
+IXIA_IP=127.0.0.1
+
+# -------------------------
+# E2E Controller
+# -------------------------
+TFS_E2E_IP=127.0.0.1
+
+# -------------------------
+# WebUI
+# -------------------------
+WEBUI_DEPLOY=true
diff --git a/src/IPs.json b/src/config/IPs.json
similarity index 100%
rename from src/IPs.json
rename to src/config/IPs.json
diff --git a/src/config/config.py b/src/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..04b72aae571b67b8e490a01267c4af5aac4e12d9
--- /dev/null
+++ b/src/config/config.py
@@ -0,0 +1,67 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import os
+from dotenv import load_dotenv
+from flask import Flask
+import logging
+
+# Load .env file if present
+load_dotenv()
+
+LOG_LEVELS = {
+    "CRITICAL": logging.CRITICAL,
+    "ERROR": logging.ERROR,
+    "WARNING": logging.WARNING,
+    "INFO": logging.INFO,
+    "DEBUG": logging.DEBUG,
+    "NOTSET": logging.NOTSET,
+}
+
+def create_config(app: Flask):
+    """Load flags into Flask app.config"""
+    # Default logging level
+    app.config["LOGGING_LEVEL"] = LOG_LEVELS.get(os.getenv("LOGGING_LEVEL", "INFO").upper(),logging.INFO)
+
+    # Dump templates
+    app.config["DUMP_TEMPLATES"] = os.getenv("DUMP_TEMPLATES", "false").lower() == "true"
+
+    # Mapper
+    app.config["NRP_ENABLED"] = os.getenv("NRP_ENABLED", "false").lower() == "true"
+    app.config["PLANNER_ENABLED"] = os.getenv("PLANNER_ENABLED", "false").lower() == "true"
+    app.config["PLANNER_TYPE"] = os.getenv("PLANNER_TYPE", "ENERGY")
+    app.config["PCE_EXTERNAL"] = os.getenv("PCE_EXTERNAL", "false").lower() == "true"
+    app.config["HRAT_IP"] = os.getenv("HRAT_IP", "192.168.1.143")
+    app.config["OPTICAL_PLANNER_IP"] = os.getenv("OPTICAL_PLANNER_IP", "10.30.7.66")
+
+    # Realizer
+    app.config["DUMMY_MODE"] = os.getenv("DUMMY_MODE", "true").lower() == "true"
+
+    # Teraflow
+    app.config["TFS_IP"] = os.getenv("TFS_IP", "127.0.0.1")
+    app.config["UPLOAD_TYPE"] = os.getenv("UPLOAD_TYPE", "WEBUI")
+    app.config["TFS_L2VPN_SUPPORT"] = os.getenv("TFS_L2VPN_SUPPORT", "false").lower() == "true"
+
+    # IXIA
+    app.config["IXIA_IP"] = os.getenv("IXIA_IP", "127.0.0.1")
+
+    # E2E Controller
+    app.config["TFS_E2E_IP"] = os.getenv("TFS_E2E_IP", "127.0.0.1")
+
+    # WebUI
+    app.config["WEBUI_DEPLOY"] = os.getenv("WEBUI_DEPLOY", "false").lower() == "true"
+
+    return app
diff --git a/src/config/constants.py b/src/config/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3d6b8783c5b610357ab07e9a566d9c71b74afb1
--- /dev/null
+++ b/src/config/constants.py
@@ -0,0 +1,33 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+from pathlib import Path
+import os
+
+# Default port for NSC deployment
+NSC_PORT = os.getenv("NSC_PORT", "8081")
+
+# Paths
+BASE_DIR = Path(__file__).resolve().parent.parent.parent
+SRC_PATH = BASE_DIR / "src"
+TEMPLATES_PATH = SRC_PATH / "templates"
+DATABASE_PATH = SRC_PATH / "database"
+CONFIG_PATH = SRC_PATH / "config"
+NBI_L2_PATH = "restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services"
+NBI_L3_PATH = "restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services"
+
+
+
diff --git a/src/database/db.py b/src/database/db.py
new file mode 100644
index 0000000000000000000000000000000000000000..341d79eb9e4f4f33487067b2890b0621872ef67c
--- /dev/null
+++ b/src/database/db.py
@@ -0,0 +1,195 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sqlite3, json, logging
+
+# Database file
+DB_NAME = "slice.db"
+
+# Initialize database and create table
+def init_db():
+    """
+    Initialize the SQLite database and create the slice table if not exists.
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("""
+        CREATE TABLE IF NOT EXISTS slice (
+            slice_id TEXT PRIMARY KEY,
+            intent TEXT NOT NULL,
+            controller TEXT NOT NULL
+        )
+    """)
+    conn.commit()
+    conn.close()
+
+# Save data to the database
+def save_data(slice_id: str, intent_dict: dict, controller: str):
+    """
+    Save a new slice entry to the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice
+        intent_dict (dict): Intent data
+        controller (str): Controller type
+    
+    Raises:
+        ValueError: If a slice with the given slice_id already exists
+    """
+    intent_str = json.dumps(intent_dict)
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    try:
+        cursor.execute("INSERT INTO slice (slice_id, intent, controller) VALUES (?, ?, ?)", (slice_id, intent_str, controller))
+        conn.commit()
+    # Handle duplicate slice ID
+    except sqlite3.IntegrityError:
+        raise ValueError(f"Slice with id '{slice_id}' already exists.")
+    finally:
+        conn.close()
+
+# Update data in the database
+def update_data(slice_id: str, new_intent_dict: dict, controller: str):
+    """
+    Update an existing slice entry in the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice
+        new_intent_dict (dict): New intent data
+        controller (str): Controller type
+    
+    Raises:
+        ValueError: If no slice is found with the given slice_id
+    """
+    intent_str = json.dumps(new_intent_dict)
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("UPDATE slice SET intent = ?, controller = ? WHERE slice_id = ?", (intent_str, controller, slice_id))
+    if cursor.rowcount == 0:
+        raise ValueError(f"No slice found with id '{slice_id}' to update.")
+    else:
+        logging.debug(f"Slice '{slice_id}' updated.")
+    conn.commit()
+    conn.close()
+
+# Delete data from the database
+def delete_data(slice_id: str):
+    """
+    Delete a slice entry from the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice to delete
+    
+    Raises:
+        ValueError: If no slice is found with the given slice_id
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("DELETE FROM slice WHERE slice_id = ?", (slice_id,))
+    if cursor.rowcount == 0:
+        raise ValueError(f"No slice found with id '{slice_id}' to delete.")
+    else:
+        logging.debug(f"Slice '{slice_id}' deleted.")
+    conn.commit()
+    conn.close()
+
+# Get data from the database
+def get_data(slice_id: str) -> dict[str, dict, str]:
+    """
+    Retrieve a specific slice entry from the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice to retrieve
+    
+    Returns:
+        dict: Slice data including slice_id, intent (as dict), and controller
+    
+    Raises:
+        ValueError: If no slice is found with the given slice_id
+        Exception: For JSON decoding errors
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("SELECT * FROM slice WHERE slice_id = ?", (slice_id,))
+    row = cursor.fetchone()
+    conn.close()
+
+    if row:
+        column_names = [description[0] for description in cursor.description]
+        result = dict(zip(column_names, row))
+        if isinstance(result.get("intent"), str):
+            try:
+                result["intent"] = json.loads(result["intent"])
+            except json.JSONDecodeError:
+                raise Exception("Warning: 'intent' is not a valid JSON string.")
+        return result
+
+    else:
+        raise ValueError(f"No slice found with id '{slice_id}'.")
+
+# Get all slices
+def get_all_data() -> dict[str, dict, str]:
+    """
+    Retrieve all slice entries from the database.
+
+    Returns:
+        list: List of slice data dictionaries including slice_id, intent (as dict), and controller
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("SELECT * FROM slice")
+    rows = cursor.fetchall()
+    conn.close()
+    return [
+        {
+        "slice_id": row[0],
+        "intent": json.loads(row[1]),
+        "controller": row[2] 
+        }
+    for row in rows
+    ]
+
+def delete_all_data():
+    """
+    Delete all slice entries from the database.
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("DELETE FROM slice")
+    conn.commit()
+    conn.close()
+    logging.debug("All slice data deleted.")
+
+# Example usage
+if __name__ == "__main__":
+    init_db()
+
+    # Save a slice
+    test_intent = {"bandwidth": "1Gbps", "latency": "10ms", "provider": "opensec"}
+    save_data("slice-001", test_intent, "TFS")
+
+    # Get the slice
+    result = get_data("slice-001")
+    if result:
+        print(f"Retrieved intent for slice-001: {result}")
+
+    # Update the slice
+    updated_intent = {"bandwidth": "2Gbps", "latency": "5ms", "provider": "opensec"}
+    update_data("slice-001", updated_intent, "TFS")
+
+    # Delete the slice
+    delete_data("slice-001")
+
+    get_all_data()
+    delete_all_data()
diff --git a/src/nrp_ddbb.json b/src/database/nrp_ddbb.json
similarity index 91%
rename from src/nrp_ddbb.json
rename to src/database/nrp_ddbb.json
index 948967ef9fd1a9389ac634b19255857a5e13d3aa..1616438516aabb21393b339ee45bf7dc637803c2 100644
--- a/src/nrp_ddbb.json
+++ b/src/database/nrp_ddbb.json
@@ -6,12 +6,12 @@
             {
               "metric-type": "one-way-bandwidth",
               "metric-unit": "kbps",
-              "bound": 1
+              "bound": 100000000000
             },
             {
               "metric-type": "one-way-delay-maximum",
               "metric-unit": "milliseconds",
-              "bound": 800
+              "bound": 1
             }
           ],
         "slices": ["slice-service-02873501-bf0a-4b02-8540-2f9d970ea20f", "slice-service-e3b22fa8-f3da-4da8-881b-c66e5161b4a5"],
@@ -24,12 +24,12 @@
           {
             "metric-type": "one-way-bandwidth",
             "metric-unit": "kbps",
-            "bound": 1
+            "bound": 10000000000000
           },
           {
             "metric-type": "one-way-delay-maximum",
             "metric-unit": "milliseconds",
-            "bound": 800
+            "bound": 2
           }
         ],
       "slices": ["slice-service-02873501-bf0a-4b02-8540-2f9d970ea20f", "slice-service-e3b22fa8-f3da-4da8-881b-c66e5161b4a5"],
diff --git a/src/database/store_data.py b/src/database/store_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fccbfb6c61878ac5658b9c8570332dbab263075
--- /dev/null
+++ b/src/database/store_data.py
@@ -0,0 +1,38 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from src.database.db import save_data, update_data
+
+def store_data(intent, slice_id, controller_type=None):
+    """
+    Store network slice intent information in a JSON database file.
+
+    This method:
+    1. Creates a JSON file if it doesn't exist
+    2. Reads existing content
+    3. Updates or adds new slice intent information
+
+    Args:
+        intent (dict): Network slice intent to be stored
+        slice_id (str, optional): Existing slice ID to update. Defaults to None.
+    """
+    # Update or add new slice intent
+    if slice_id:
+        update_data(slice_id, intent, controller_type)
+    else:
+        # Add new slice intent
+        slice_id = intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        save_data(slice_id, intent, controller_type)
\ No newline at end of file
diff --git a/src/main.py b/src/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ef517d17df077bbfdbc9992ac0a90c3820dfdf2
--- /dev/null
+++ b/src/main.py
@@ -0,0 +1,129 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging
+import time
+from src.utils.dump_templates import dump_templates
+from src.utils.build_response import build_response
+from src.nbi_processor.main import nbi_processor
+from src.database.store_data import store_data
+from src.mapper.main import mapper
+from src.realizer.main import realizer
+from src.realizer.send_controller import send_controller
+
+class NSController:
+    """
+    Network Slice Controller (NSC) - A class to manage network slice creation,
+    modification, and deletion across different network domains.
+
+    This controller handles the translation, mapping, and realization of network
+    slice intents from different formats (3GPP and IETF) to network-specific
+    configurations.
+
+    Key Functionalities:
+    - Intent Processing: Translate and process network slice intents
+    - Slice Management: Create, modify, and delete network slices
+    - NRP (Network Resource Partition) Mapping: Match slice requirements with available resources
+    - Slice Realization: Convert intents to specific network configurations (L2VPN, L3VPN)
+    """
+
+    def __init__(self, controller_type = "TFS"):
+        """
+        Initialize the Network Slice Controller.
+
+        Args:
+            controller_type (str): Flag to determine if configurations
+                should be uploaded to Teraflow or IXIA system.
+
+        Attributes:
+            controller_type (str): Flag for Teraflow or Ixia upload
+            response (dict): Stores slice creation responses
+            start_time (float): Tracks slice setup start time
+            end_time (float): Tracks slice setup end time
+            setup_time (float): Total time taken for slice setup in milliseconds
+        """
+        self.controller_type = controller_type
+
+        self.path = ""
+        self.response = []
+        self.start_time = 0
+        self.end_time = 0
+        self.setup_time = 0
+
+    def nsc(self, intent_json, slice_id=None):
+        """
+        Main Network Slice Controller method to process and realize network slice intents.
+
+        Workflow:
+        1. Load IETF template
+        2. Process intent (detect format, translate if needed)
+        3. Extract slice data
+        4. Store slice information
+        5. Map slice to Network Resource Pool (NRP)
+        6. Realize slice configuration
+        7. Send configuration to network controllers
+
+        Args:
+            intent_json (dict): Network slice intent in 3GPP or IETF format
+            slice_id (str, optional): Existing slice identifier for modification
+
+        Returns:
+            dict: Contains slice creation responses and setup time in milliseconds
+
+        """
+        # Start performance tracking
+        self.start_time = time.perf_counter()
+
+        # Reset requests
+        requests = {"services":[]}
+        response = None
+
+        # Process intent (translate if 3GPP)
+        ietf_intents = nbi_processor(intent_json)
+
+        for intent in ietf_intents:
+            # Mapper
+            rules = mapper(intent)
+            # Build response
+            self.response = build_response(intent, self.response, controller_type= self.controller_type)
+            # Realizer
+            request = realizer(intent, controller_type=self.controller_type, response = self.response, rules = rules)
+            # Store slice request details
+            if request: 
+                requests["services"].append(request)
+                store_data(intent, slice_id, controller_type=self.controller_type)
+
+        # Store the generated template for debugging
+        dump_templates(intent_json, ietf_intents, requests)
+
+        # Check if there are services to process
+        if not requests.get("services"):
+            raise RuntimeError("No service to process.")
+
+        # Send config to controllers
+        response = send_controller(self.controller_type, requests)
+
+        if not response:
+            raise Exception("Controller upload failed")
+
+        # End performance tracking
+        self.end_time = time.perf_counter()
+        setup_time = (self.end_time - self.start_time) * 1000
+
+        return {
+            "slices": self.response,
+            "setup_time": setup_time
+        }
\ No newline at end of file
diff --git a/src/mapper/main.py b/src/mapper/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e0a4c7aefbd535b692d30c24a925ffee74b9b9d
--- /dev/null
+++ b/src/mapper/main.py
@@ -0,0 +1,77 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from src.planner.planner import Planner
+from .slo_viability import slo_viability
+from src.realizer.main import realizer
+from flask import current_app
+
+def mapper(ietf_intent):
+    """
+    Map an IETF network slice intent to the most suitable Network Resource Partition (NRP).
+
+    This method:
+    1. If NRP is enabled, retrieves the current NRP view
+    2. Extracts Service Level Objectives (SLOs) from the intent
+    3. Finds NRPs that can meet the SLO requirements
+    4. Selects the best NRP based on viability and availability
+    5. Attaches the slice to the selected NRP or creates a new one
+    6. If planner is enabled, computes the optimal path for the slice
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+
+    Returns:
+        dict or None: Optimal path if planner is enabled; otherwise, None.
+    """
+    if current_app.config["NRP_ENABLED"]:
+        # Retrieve NRP view
+        nrp_view = realizer(None, True, "READ")
+
+        # Extract Service Level Objectives (SLOs) from the intent
+        slos = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+        if slos:
+            # Find candidate NRPs that can meet the SLO requirements
+            candidates = [
+                (nrp, slo_viability(slos, nrp)[1])
+                for nrp in nrp_view
+                if slo_viability(slos, nrp)[0] and nrp["available"]
+            ]
+            logging.debug(f"Candidates: {candidates}")
+
+            # Select the best NRP based on candidates
+            best_nrp = max(candidates, key=lambda x: x[1])[0] if candidates else None
+            logging.debug(f"Best NRP: {best_nrp}")
+
+            if best_nrp:
+                best_nrp["slices"].append(ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"])
+                # Update NRP view
+                realizer(ietf_intent, True, "UPDATE")
+                # TODO Here we should put how the slice is attached to an already created nrp
+            else:
+                # Request the controller to create a new NRP that meets the SLOs
+                answer = realizer(ietf_intent, True, "CREATE", best_nrp)
+                if not answer:
+                    logging.error("Slice rejected due to lack of NRPs")
+                    return None
+                # TODO Here we should put how the slice is attached to the new nrp
+
+    if current_app.config["PLANNER_ENABLED"]:
+        optimal_path = Planner().planner(ietf_intent, current_app.config["PLANNER_TYPE"])
+        logging.debug(f"Optimal path: {optimal_path}")
+        return optimal_path
+    return None
\ No newline at end of file
diff --git a/src/mapper/slo_viability.py b/src/mapper/slo_viability.py
new file mode 100644
index 0000000000000000000000000000000000000000..a91b9296e6d67c0270e232faea491bb7f7d1a914
--- /dev/null
+++ b/src/mapper/slo_viability.py
@@ -0,0 +1,64 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+
+def slo_viability(slice_slos, nrp_slos):
+    """
+    Compare Service Level Objectives (SLOs) between a slice and a Network Resource Partition (NRP).
+
+    This method assesses whether an NRP can satisfy the SLOs of a network slice.
+
+    Args:
+        slice_slos (list): Service Level Objectives of the slice
+        nrp_slos (dict): Service Level Objectives of the Network Resource Pool
+
+    Returns:
+        tuple: A boolean indicating viability and a flexibility score
+            - First value: True if NRP meets SLOs, False otherwise
+            - Second value: A score representing how well the NRP meets the SLOs
+    """
+    # Define SLO types for maximum and minimum constraints
+    slo_type = {
+        "max": ["one-way-delay-maximum", "two-way-delay-maximum", "one-way-delay-percentile", "two-way-delay-percentile",
+                "one-way-delay-variation-maximum", "two-way-delay-variation-maximum",
+                "one-way-delay-variation-percentile", "two-way-delay-variation-percentile",
+                "one-way-packet-loss", "two-way-packet-loss"],
+        "min": ["one-way-bandwidth", "two-way-bandwidth", "shared-bandwidth"]
+    }
+    score = 0
+    flexibility_scores = []
+    for slo in slice_slos:
+        for nrp_slo in nrp_slos['slos']:
+            if slo["metric-type"] == nrp_slo["metric-type"]:
+                # Handle maximum type SLOs
+                if slo["metric-type"] in slo_type["max"]:
+                    logging.debug(f"SLO: {slo}, NRP SLO: {nrp_slo}")
+                    flexibility = (slo["bound"] - nrp_slo["bound"]) / slo["bound"]
+                    if slo["bound"] < nrp_slo["bound"]:
+                        return False, 0  # Does not meet maximum constraint
+                # Handle minimum type SLOs
+                if slo["metric-type"] in slo_type["min"]:
+                    logging.debug(f"SLO: {slo}, NRP SLO: {nrp_slo}")
+                    flexibility = (nrp_slo["bound"] - slo["bound"]) / slo["bound"]
+                    if slo["bound"] > nrp_slo["bound"]:
+                        return False, 0  # Does not meet minimum constraint
+                flexibility_scores.append(flexibility)
+                break  # Exit inner loop after finding matching metric
+        
+        # Calculate final viability score
+        score = sum(flexibility_scores) / len(flexibility_scores) if flexibility_scores else 0
+    return True, score  # Si pasó todas las verificaciones, la NRP es viable
\ No newline at end of file
diff --git a/src/nbi_processor/detect_format.py b/src/nbi_processor/detect_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b48be3eb90ef412079caf81092758333aff4a03
--- /dev/null
+++ b/src/nbi_processor/detect_format.py
@@ -0,0 +1,40 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+def detect_format(json_data):    
+    """
+    Detect the format of the input network slice intent.
+
+    This method identifies whether the input JSON is in 3GPP or IETF format 
+    by checking for specific keys in the JSON structure.
+
+    Args:
+        json_data (dict): Input network slice intent JSON
+
+    Returns:
+        str or None: 
+            - "IETF" if IETF-specific keys are found
+            - "3GPP" if 3GPP-specific keys are found
+            - None if no recognizable format is detected
+    """
+    # Check for IETF-specific key
+    if "ietf-network-slice-service:network-slice-services" in json_data:
+        return "IETF"
+    # Check for 3GPP-specific keys
+    if any(key in json_data for key in ["NetworkSlice1", "TopSliceSubnet1", "CNSliceSubnet1", "RANSliceSubnet1"]):
+        return "3GPP"
+    
+    return None
\ No newline at end of file
diff --git a/src/nbi_processor/main.py b/src/nbi_processor/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfa55ddfe27b0433a84fe69fcfc30d3b7e6fabf4
--- /dev/null
+++ b/src/nbi_processor/main.py
@@ -0,0 +1,56 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .detect_format import detect_format
+from .translator import translator
+
+def nbi_processor(intent_json):
+    """
+    Process and translate network slice intents from different formats (3GPP or IETF).
+
+    This method detects the input JSON format and converts 3GPP intents to IETF format.
+
+    Args:
+        intent_json (dict): Input network slice intent in either 3GPP or IETF format.
+
+    Returns:
+        list: A list of IETF-formatted network slice intents.
+
+    Raises:
+        ValueError: If the JSON request format is not recognized.
+    """
+    # Detect the input JSON format (3GPP or IETF)
+    format = detect_format(intent_json)
+    ietf_intents = []
+
+    # TODO Needs to be generalized to support different names of slicesubnets
+    # Process different input formats
+    if format == "3GPP":
+        # Translate each subnet in 3GPP format to IETF format
+        for subnet in intent_json["RANSliceSubnet1"]["networkSliceSubnetRef"]:
+            ietf_intents.append(translator(intent_json, subnet))
+        logging.info(f"3GPP requests translated to IETF template")
+    elif format == "IETF":
+        # If already in IETF format, add directly
+        logging.info(f"IETF intent received")
+        ietf_intents.append(intent_json)
+    else:
+        # Handle unrecognized format
+        logging.error(f"JSON request format not recognized")
+        raise ValueError("JSON request format not recognized")
+    
+    return ietf_intents or None
\ No newline at end of file
diff --git a/src/nbi_processor/translator.py b/src/nbi_processor/translator.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f23d19ecca78b635d66be1c969f95808a3ebdff
--- /dev/null
+++ b/src/nbi_processor/translator.py
@@ -0,0 +1,107 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import uuid, os
+from src.utils.load_template import load_template
+from src.config.constants import TEMPLATES_PATH
+
+def translator(gpp_intent, subnet):
+    """
+    Translate a 3GPP network slice intent to IETF format.
+
+    This method converts a 3GPP intent into a standardized IETF intent template, 
+    mapping key parameters such as QoS profiles, service endpoints, and connection details.
+
+    Notes:
+    - Generates a unique slice service ID using UUID
+    - Maps QoS requirements, source/destination endpoints
+    - Logs the translated intent to a JSON file for reference
+
+    Args:
+        gpp_intent (dict): Original 3GPP network slice intent
+        subnet (str): Specific subnet reference within the 3GPP intent
+
+    Returns:
+        dict: Translated IETF-formatted network slice intent
+    """
+    # Load IETF template and create a copy to modify
+    ietf_i = load_template(os.path.join(TEMPLATES_PATH, "ietf_template_empty.json"))
+
+    # Extract endpoint transport objects
+    ep_transport_objects = gpp_intent[subnet]["EpTransport"]
+
+    # Populate template with SLOs (currently supporting QoS profile, latency and bandwidth)
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = gpp_intent[ep_transport_objects[0]]["qosProfile"]
+    
+    profile = gpp_intent.get(subnet, {}).get("SliceProfileList", [{}])[0].get("RANSliceSubnetProfile", {})
+
+
+    metrics = {
+        ("uLThptPerSliceSubnet", "MaxThpt"): ("one-way-bandwidth", "kbps"),
+        ("uLLatency",): ("one-way-delay-maximum", "milliseconds"),
+        ("EnergyConsumption",): ("energy_consumption", "Joules"),
+        ("EnergyEfficiency",): ("energy_efficiency", "W/bps"),
+        ("CarbonEmissions",): ("carbon_emission", "gCO2eq"),
+        ("RenewableEnergyUsage",): ("renewable_energy_usage", "rate")
+    }
+
+    # Aux
+    def get_nested(d, keys):
+        for k in keys:
+            if isinstance(d, dict) and k in d:
+                d = d[k]
+            else:
+                return None
+        return d
+
+    for key_path, (metric_type, metric_unit) in metrics.items():
+        value = get_nested(profile, key_path)
+        if value is not None:
+            ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]\
+                ["slo-sle-template"][0]["slo-policy"]["metric-bound"].append({
+                "metric-type": metric_type,
+                "metric-unit": metric_unit,
+                "bound": value
+            })
+
+
+    # Generate unique slice service ID and description
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] = f"slice-service-{uuid.uuid4()}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = f"Transport network slice mapped with 3GPP slice {next(iter(gpp_intent))}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["slo-sle-policy"]["slo-sle-template"] = ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
+    
+    # Configure Source SDP
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["node-id"] = ep_transport_objects[0].split(" ", 1)[1]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[0]]["EpApplicationRef"][0]]["localAddress"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceType"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceId"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[0]]["IpAddress"] 
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[0]]["NextHopInfo"] 
+
+    # Configure Destination SDP
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["node-id"] = ep_transport_objects[1].split(" ", 1)[1]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[1]]["EpApplicationRef"][0]]["localAddress"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceType"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceId"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[1]]["IpAddress"] 
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[1]]["NextHopInfo"] 
+
+    # Configure Connection Group and match-criteria
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["connection-groups"]["connection-group"][0]["id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+
+    return ietf_i
\ No newline at end of file
diff --git a/src/network_slice_controller.py b/src/network_slice_controller.py
deleted file mode 100644
index 6ac70885c872dda4d18e919a602abd4f1a15c870..0000000000000000000000000000000000000000
--- a/src/network_slice_controller.py
+++ /dev/null
@@ -1,1259 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-#     http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file includes original contributions from Telefonica Innovación Digital S.L.
-
-import json, time, os, logging, uuid, traceback, sys
-from datetime import datetime
-from src.helpers import tfs_connector, cisco_connector
-from src.Constants import DEFAULT_LOGGING_LEVEL, TFS_IP, TFS_L2VPN_SUPPORT, IXIA_IP, SRC_PATH, TEMPLATES_PATH, DUMMY_MODE, DUMP_TEMPLATES, PLANNER_ENABLED, NRP_ENABLED, UPLOAD_TYPE, NBI_L2_PATH, NBI_L3_PATH
-from src.realizers.ixia.NEII_V4 import NEII_controller
-from src.planner.planner import Planner
-
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
-    level=DEFAULT_LOGGING_LEVEL,
-    format='%(levelname)s - %(message)s')
-
-class NSController:
-    """
-    Network Slice Controller (NSC) - A class to manage network slice creation, 
-    modification, and deletion across different network domains.
-
-    This controller handles the translation, mapping, and realization of network 
-    slice intents from different formats (3GPP and IETF) to network-specific 
-    configurations.
-
-    Key Functionalities:
-    - Intent Processing: Translate and process network slice intents
-    - Slice Management: Create, modify, and delete network slices
-    - NRP (Network Resource Partition) Mapping: Match slice requirements with available resources
-    - Slice Realization: Convert intents to specific network configurations (L2VPN, L3VPN)
-    """
-
-    def __init__(self, controller_type = "TFS", tfs_ip=TFS_IP, ixia_ip =IXIA_IP, need_l2vpn_support=TFS_L2VPN_SUPPORT): 
-        """
-        Initialize the Network Slice Controller.
-
-        Args:
-            controller_type (str): Flag to determine if configurations 
-                should be uploaded to Teraflow or IXIA system.
-            need_l2vpn_support (bool, optional): Flag to determine if additional
-                L2VPN configuration support is required. Defaults to False.
-        
-        Attributes:
-            controller_type (str): Flag for Teraflow or Ixia upload
-            answer (dict): Stores slice creation responses
-            start_time (float): Tracks slice setup start time
-            end_time (float): Tracks slice setup end time
-            need_l2vpn_support (bool): Flag for additional L2VPN configuration support
-        """
-        self.controller_type = controller_type
-        self.tfs_ip = tfs_ip
-        self.path = ""
-        self.answer = {}
-        self.cool_answer = {}
-        self.start_time = 0
-        self.end_time = 0
-        self.setup_time = 0
-        self.need_l2vpn_support = need_l2vpn_support
-        # Internal templates and views
-        self.__gpp_template = ""
-        self.__ietf_template = ""
-        self.__teraflow_template = ""
-        self.__nrp_view = ""
-        self.subnet=""
-
-    # API Methods
-    def add_flow(self, intent):
-        """
-        Create a new transport network slice.
-
-        Args:
-            intent (dict): Network slice intent in 3GPP or IETF format
-
-        Returns:
-            Result of the Network Slice Controller (NSC) operation
-
-        API Endpoint:
-            POST /slice
-
-        Raises:
-            ValueError: If no transport network slices are found
-            Exception: For unexpected errors during slice creation process
-        """
-        return self.nsc(intent)
-
-    def get_flows(self,slice_id=None):
-        """
-        Retrieve transport network slice information.
-
-        This method allows retrieving:
-        - All transport network slices
-        - A specific slice by its ID
-
-        Args:
-            slice_id (str, optional): Unique identifier of a specific slice. 
-                                      Defaults to None.
-
-        Returns:
-            dict or list: 
-            - If slice_id is provided: Returns the specific slice details
-            - If slice_id is None: Returns a list of all slices
-            - Returns an error response if no slices are found
-
-        API Endpoint:
-            GET /slice/{id}
-
-        Raises:
-            ValueError: If no transport network slices are found
-            Exception: For unexpected errors during file processing
-        """
-        try:
-            # Read slice database from JSON file
-            with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'r') as file:
-                content = json.load(file)
-            # If specific slice ID is provided, find and return matching slice
-            if slice_id:
-                for slice in content:
-                    if slice["slice_id"] == slice_id:
-                        return slice
-            # If no slices exist, raise an error
-            if len(content) == 0:
-                raise ValueError("Transport network slices not found")
-            
-            # Return all slices if no specific ID is given
-            return [slice for slice in content if slice.get("controller") == self.controller_type]
-        
-        except ValueError as e:
-            # Handle case where no slices are found
-            return self.__send_response(False, code=404, message=str(e))
-        except Exception as e:
-            # Handle unexpected errors
-            return self.__send_response(False, code=500, message=str(e))
-
-    def modify_flow(self,slice_id, intent):
-        """
-        Modify an existing transport network slice.
-
-        Args:
-            slice_id (str): Unique identifier of the slice to modify
-            intent (dict): New intent configuration for the slice
-
-        Returns:
-            Result of the Network Slice Controller (NSC) operation
-
-        API Endpoint:
-            PUT /slice/{id}
-        """
-        return self.nsc(intent, slice_id)
-
-    def delete_flows(self, slice_id=None):
-        """
-        Delete transport network slice(s).
-
-        This method supports:
-        - Deleting a specific slice by ID
-        - Deleting all slices
-        - Optional cleanup of L2VPN configurations
-
-        Args:
-            slice_id (str, optional): Unique identifier of slice to delete. 
-                                      Defaults to None.
-
-        Returns:
-            dict: Response indicating successful deletion or error details
-
-        API Endpoint:
-            DELETE /slice/{id}
-
-        Raises:
-            ValueError: If no slices are found to delete
-            Exception: For unexpected errors during deletion process
-
-        Notes:
-            - If controller_type is TFS, attempts to delete from Teraflow
-            - If need_l2vpn_support is True, performs additional L2VPN cleanup
-        """
-        try:
-            # Read current slice database
-            with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'r') as file:
-                content = json.load(file)
-            id = None
-
-            # Delete specific slice if slice_id is provided
-            if slice_id:
-                for i, slice in enumerate(content):
-                    if slice["slice_id"] == slice_id and slice.get("controller") == self.controller_type:
-                        del content[i]
-                        id = i
-                        break
-                # Raise error if slice not found
-                if id is None:
-                    raise ValueError("Transport network slice not found")
-                # Update slice database
-                with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'w') as file:
-                    json.dump(content, file, indent=4)
-                logging.info(f"Slice {slice_id} removed successfully")
-                return self.__send_response(False, code=200, status="success", message=f"Transpor network slice {slice_id} deleted successfully")
-            
-            # Delete all slices
-            else:
-                # Optional: Delete in Teraflow if configured
-                if self.controller_type == "TFS":
-                    # TODO: should send a delete request to Teraflow
-                    if self.need_l2vpn_support:
-                        self.__tfs_l2vpn_delete()
-
-                data_removed = [slice for slice in content if slice.get("controller") == self.controller_type] 
-
-                # Verify slices exist before deletion
-                if len(data_removed) == 0:
-                    raise ValueError("Transport network slices not found")
-
-                filtered_data = [slice for slice in content if slice.get("controller") != self.controller_type]    
-                # Clear slice database
-                with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'w') as file:
-                    json.dump(filtered_data, file, indent=4)
-
-                logging.info("All slices removed successfully")
-                return self.__send_response(False, code=200, status="success", message="All transport network slices deleted successfully.")
-        
-        except ValueError as e:
-            return self.__send_response(False, code=404, message=str(e))
-        except Exception as e:
-            return self.__send_response(False, code=500, message=str(e))
-
-    # Main NSC Functionalities    
-    def nsc(self, intent_json, slice_id=None):
-        """
-        Main Network Slice Controller method to process and realize network slice intents.
-
-        Workflow:
-        1. Load IETF template
-        2. Process intent (detect format, translate if needed)
-        3. Extract slice data
-        4. Store slice information
-        5. Map slice to Network Resource Pool (NRP)
-        6. Realize slice configuration
-        7. Upload to Teraflow (optional)
-
-        Args:
-            intent_json (dict): Network slice intent in 3GPP or IETF format
-            slice_id (str, optional): Existing slice identifier for modification
-
-        Returns:
-            tuple: Response status and HTTP status code
-        
-        """
-        try:
-            # Start performance tracking
-            self.start_time = time.perf_counter()
-
-            # Reset requests and load IETF template
-            self.__load_template(1, os.path.join(TEMPLATES_PATH, "ietf_template_empty.json"))  
-            requests = {"services":[]}
-
-            # Store the received template for debugging
-            if DUMP_TEMPLATES:
-                with open(os.path.join(TEMPLATES_PATH, "nbi_template.json"), "w") as file:
-                    file.write(json.dumps(intent_json,indent=2))
-            
-            # Process intent (translate if 3GPP)
-            ietf_intents = self.__nbi_processor(intent_json)
-
-            # Store the generated template for debugging
-            if DUMP_TEMPLATES:
-                with open(os.path.join(TEMPLATES_PATH, "ietf_template.json"), "w") as file:
-                    file.write(json.dumps(ietf_intents,indent=2))
-
-            if ietf_intents:
-                for intent in ietf_intents:
-                     # Extract and store slice request details
-                    self.__extract_data(intent)
-                    self.__store_data(intent, slice_id)       
-                    # Mapper
-                    self.__mapper(intent)
-                    # Realizer
-                    tfs_request = self.__realizer(intent)
-                    requests["services"].append(tfs_request)
-            else:
-                return self.__send_response(False, code=404, message="No intents found")
-            
-            # Store the generated template for debugging
-            if DUMP_TEMPLATES:
-                with open(os.path.join(TEMPLATES_PATH, "realizer_template.json"), "w") as archivo:
-                    archivo.write(json.dumps(requests,indent=2))
-            
-            # Optional: Upload template to Teraflow
-            if not DUMMY_MODE:
-                if self.controller_type == "TFS":
-                    if UPLOAD_TYPE == "WEBUI":
-                        response = tfs_connector().webui_post(self.tfs_ip, requests)
-                    elif UPLOAD_TYPE == "NBI":
-                        for intent in requests["services"]:
-                            # Send each separate NBI request
-                            response = tfs_connector().nbi_post(self.tfs_ip, intent, self.path)
-
-                            if not response.ok:
-                                return self.__send_response(False, code=response.status_code, message=f"Teraflow upload failed. Response: {response.text}")
-                    
-                    # For deploying an L2VPN with path selection (not supported by Teraflow)
-                    if self.need_l2vpn_support:
-                        self.__tfs_l2vpn_support(requests["services"])
-
-                    logging.info("Request sent to Teraflow")
-                elif self.controller_type == "IXIA":
-                    neii_controller = NEII_controller()
-                    for intent in requests["services"]:
-                        # Send each separate IXIA request
-                        neii_controller.nscNEII(intent)
-                    logging.info("Requests sent to Ixia")
-
-            # End performance tracking
-            self.end_time = time.perf_counter()
-            return self.__send_response(True, code=200)
-
-        except ValueError as e:
-            return self.__send_response(False, code=400, message=str(e))
-        except Exception as e:
-            return self.__send_response(False, code=500, message=str(e))
-        
-    def __nbi_processor(self, intent_json):
-        """
-        Process and translate network slice intents from different formats (3GPP or IETF).
-
-        This method detects the input JSON format and converts 3GPP intents to IETF format.
-        Supports multiple slice subnets in 3GPP format.
-
-        Args:
-            intent_json (dict): Input network slice intent in either 3GPP or IETF format.
-
-        Returns:
-            list: A list of IETF-formatted network slice intents.
-
-        Raises:
-            ValueError: If the JSON request format is not recognized.
-        """
-        # Detect the input JSON format (3GPP or IETF)
-        format = self.__detect_format(intent_json)
-        ietf_intents = []
-
-        # TODO Needs to be generalized to support different names of slicesubnets
-        # Process different input formats
-        if format == "3GPP":
-            # Translate each subnet in 3GPP format to IETF format
-            for subnet in intent_json["RANSliceSubnet1"]["networkSliceSubnetRef"]:
-                ietf_intents.append(self.__translator(intent_json, subnet))
-            logging.info(f"3GPP requests translated to IETF template")
-        elif format == "IETF":
-            # If already in IETF format, add directly
-            logging.info(f"IETF intent received")
-            ietf_intents.append(intent_json)
-        else:
-            # Handle unrecognized format
-            logging.error(f"JSON request format not recognized")
-            raise ValueError("JSON request format not recognized")
-        
-        return ietf_intents
-
-    def __mapper(self, ietf_intent):
-        """
-        Map an IETF network slice intent to the most suitable Network Resource Partition (NRP).
-
-        This method:
-        1. Retrieves the current NRP view
-        2. Extracts Service Level Objectives (SLOs) from the intent
-        3. Finds NRPs that can meet the SLO requirements
-        4. Selects the best NRP based on viability and availability
-        5. Attaches the slice to the selected NRP or creates a new one
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Raises:
-            Exception: If no suitable NRP is found and slice creation fails.
-        """ 
-        if NRP_ENABLED:
-            # Retrieve NRP view
-            self.__realizer(None, True, "READ")
-
-            # Extract Service Level Objectives (SLOs) from the intent
-            slos = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
-
-            if slos:
-                # Find candidate NRPs that can meet the SLO requirements
-                candidates = [
-                    (nrp, self.__slo_viability(slos, nrp)[1]) 
-                    for nrp in self.__nrp_view 
-                    if self.__slo_viability(slos, nrp)[0] and nrp["available"]
-                ]
-                logging.debug(f"Candidates: {candidates}")
-
-                # Select the best NRP based on candidates
-                best_nrp = max(candidates, key=lambda x: x[1])[0] if candidates else None
-                logging.debug(f"Best NRP: {best_nrp}")
-
-                if best_nrp:
-                    best_nrp["slices"].append(ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"])
-                    # Update NRP view
-                    self.__realizer(ietf_intent, True, "UPDATE")
-                    # TODO Here we should put how the slice is attached to an already created nrp
-                else: 
-                    # Request the controller to create a new NRP that meets the SLOs
-                    answer = self.__realizer(ietf_intent, True, "CREATE", best_nrp)
-                    if not answer:
-                        raise Exception("Slice rejected due to lack of NRPs") 
-                    # TODO Here we should put how the slice is attached to the new nrp
-        
-        if PLANNER_ENABLED:
-            optimal_path = Planner().planner(ietf_intent)
-
-            logging.info(f"Optimal path: {optimal_path}")
-
-    def __realizer(self, ietf_intent, need_nrp=False, order=None, nrp=None):
-        """
-        Manage the slice creation workflow.
-
-        This method handles two primary scenarios:
-        1. Interact with network controllers for NRP (Network Resource Partition) operations when need_nrp is True
-        2. Slice service selection when need_nrp is False
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-            need_nrp (bool, optional): Flag to indicate if NRP operations are needed. Defaults to False.
-            order (str, optional): Type of NRP operation (READ, UPDATE, CREATE). Defaults to None.
-            nrp (dict, optional): Specific Network Resource Partition to operate on. Defaults to None.
-        """
-        if need_nrp:
-            # Perform NRP-related operations
-            self.__nrp(order, nrp)
-        else:
-            # Select slice service method
-            way = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"]["tag-type"]["value"]
-            way = "L3VPN"
-            return self.__select_way(controller=self.controller_type, way=way, ietf_intent=ietf_intent)
-
-    ### Generic functionalities
-    def __load_template(self, which, dir_t):
-        """
-        Load and process JSON templates for different network slice formats.
-
-        Args:
-            which (int): Template selector (0: 3GPP, 1: IETF, other: Teraflow)
-            dir_t (str): Directory path to the template file
-        """
-        try:
-            # Open and read the template file
-            with open(dir_t, 'r') as source:
-                # Clean up the JSON template
-                template = source.read().replace('\t', '').replace('\n', '').replace("'", '"').strip()
-                
-                # Store template based on selector
-                if which == 0:
-                    self.__gpp_template = template
-                elif which == 1:
-                    self.__ietf_template = template
-                else:
-                    self.__teraflow_template = template
-                
-        except Exception as e:
-            logging.error(f"Template loading error: {e}")
-            return self.__send_response(False, code=500, message=f"Template loading error: {e}")
-
-    def __send_response(self, result, status="error", message=None, code=None):
-        """
-        Generate and send a response to the 3GPP client about the slice request.
-
-        Args:
-            result (bool): Indicates whether the slice request was successful
-            status (str, optional): Response status. Defaults to "error"
-            message (str, optional): Additional error message. Defaults to None
-            code (str, optional): Response code. Defaults to None
-
-        Returns:
-            tuple: A tuple containing the response dictionary and status code
-        """    
-        if result:
-            # Successful slice creation
-            logging.info("Your slice request was fulfilled sucessfully")
-            self.setup_time = (self.end_time - self.start_time)*1000
-            logging.info(f"Setup time: {self.setup_time:.2f}")
-
-            # Construct detailed successful response
-            answer = {
-                "status": "success",
-                "code": code,
-                "slices": [],
-                "setup_time": self.setup_time
-            }
-            # Add slice details to the response
-            for subnet in self.answer:
-                slice_info = {
-                    "id": subnet,
-                    "source": self.answer[subnet]["Source"],
-                    "destination": self.answer[subnet]["Destination"],
-                    "vlan": self.answer[subnet]["VLAN"],
-                    "requirements": self.answer[subnet]["QoS Requirements"],
-                }
-                answer["slices"].append(slice_info)
-            self.cool_answer = answer
-        else:
-            # Failed slice creation
-            logging.info("Your request cannot be fulfilled. Reason: "+message)
-            self.cool_answer = {
-                "status" :status,
-                "code": code,
-                "message": message
-            }
-        return self.cool_answer, code
-
-    def __extract_data(self, intent_json):
-        """
-        Extract source and destination IP addresses from the IETF intent.
-
-        Args:
-            intent_json (dict): IETF-formatted network slice intent
-        """
-        # Extract source and destination IP addresses
-        source = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"]
-        destination = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"]
-
-        logging.info(f"Intent generated between {source} and {destination}") 
-
-        # Store slice and connection details
-        self.subnet = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-        self.subnet = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-        self.answer[self.subnet] = {
-            "Source": source,
-            "Destination": destination
-        }
-    
-    def __store_data(self, intent, slice_id):
-        """
-        Store network slice intent information in a JSON database file.
-
-        This method:
-        1. Creates a JSON file if it doesn't exist
-        2. Reads existing content
-        3. Updates or adds new slice intent information
-
-        Args:
-            intent (dict): Network slice intent to be stored
-            slice_id (str, optional): Existing slice ID to update. Defaults to None.
-        """
-        file_path = os.path.join(SRC_PATH, "slice_ddbb.json")
-        # Create initial JSON file if it doesn't exist
-        if not os.path.exists(file_path):
-            with open(file_path, 'w') as file:
-                json.dump([], file, indent=4)
-
-        # Read existing content
-        with open(file_path, 'r') as file:
-            content = json.load(file)
-    
-        # Update or add new slice intent
-        if slice_id:
-            # Update existing slice intent
-            for slice in content:
-                if slice["slice_id"] == slice_id:
-                    slice["intent"] = intent
-        else:
-            # Add new slice intent
-            content.append(
-                {
-                    "slice_id": intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"],
-                    "intent": intent,
-                    "controller": self.controller_type,
-                })
-        
-        # # Write updated content back to file
-        with open(file_path, 'w') as file:
-            json.dump(content, file, indent=4)
-
-    ### NBI processor functionalities
-    def __detect_format(self,json_data):    
-        """
-        Detect the format of the input network slice intent.
-
-        This method identifies whether the input JSON is in 3GPP or IETF format 
-        by checking for specific keys in the JSON structure.
-
-        Args:
-            json_data (dict): Input network slice intent JSON
-
-        Returns:
-            str or None: 
-                - "IETF" if IETF-specific keys are found
-                - "3GPP" if 3GPP-specific keys are found
-                - None if no recognizable format is detected
-        """
-        # Check for IETF-specific key
-        if "ietf-network-slice-service:network-slice-services" in json_data:
-            return "IETF"
-        # Check for 3GPP-specific keys
-        if any(key in json_data for key in ["NetworkSlice1", "TopSliceSubnet1", "CNSliceSubnet1", "RANSliceSubnet1"]):
-            return "3GPP"
-        
-        return None
-    
-    def __translator(self, gpp_intent, subnet):
-        """
-        Translate a 3GPP network slice intent to IETF format.
-
-        This method converts a 3GPP intent into a standardized IETF intent template, 
-        mapping key parameters such as QoS profiles, service endpoints, and connection details.
-
-        Args:
-            gpp_intent (dict): Original 3GPP network slice intent
-            subnet (str): Specific subnet reference within the 3GPP intent
-
-        Returns:
-            dict: Translated IETF-formatted network slice intent
-        
-        Notes:
-            - Generates a unique slice service ID using UUID
-            - Maps QoS requirements, source/destination endpoints
-            - Logs the translated intent to a JSON file for reference
-        """
-        # Load IETF template and create a copy to modify
-        ietf_i = json.loads(str(self.__ietf_template))
-
-        # Extract endpoint transport objects
-        ep_transport_objects = gpp_intent[subnet]["EpTransport"]
-
-        # Populate template with SLOs (currently supporting QoS profile, latency and bandwidth)
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = gpp_intent[ep_transport_objects[0]]["qosProfile"]
-        
-        profile = gpp_intent.get(subnet, {}).get("SliceProfileList", [{}])[0].get("RANSliceSubnetProfile", {})
-
-    
-        metrics = {
-            ("uLThptPerSliceSubnet", "MaxThpt"): ("one-way-bandwidth", "kbps"),
-            ("uLLatency",): ("one-way-delay-maximum", "milliseconds"),
-            ("EnergyConsumption",): ("energy_consumption", "Joules"),
-            ("EnergyEfficiency",): ("energy_efficiency", "W/bps"),
-            ("CarbonEmissions",): ("carbon_emission", "gCO2eq"),
-            ("RenewableEnergyUsage",): ("renewable_energy_usage", "rate")
-        }
-
-        # Aux
-        def get_nested(d, keys):
-            for k in keys:
-                if isinstance(d, dict) and k in d:
-                    d = d[k]
-                else:
-                    return None
-            return d
-
-        for key_path, (metric_type, metric_unit) in metrics.items():
-            value = get_nested(profile, key_path)
-            if value is not None:
-                ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]\
-                    ["slo-sle-template"][0]["slo-policy"]["metric-bound"].append({
-                    "metric-type": metric_type,
-                    "metric-unit": metric_unit,
-                    "bound": value
-                })
-
-
-        # Generate unique slice service ID and description
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] = f"slice-service-{uuid.uuid4()}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = f"Transport network slice mapped with 3GPP slice {next(iter(gpp_intent))}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["slo-sle-policy"]["slo-sle-template"] = ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
-        
-        # Configure Source SDP
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["node-id"] = ep_transport_objects[0].split(" ", 1)[1]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[0]]["EpApplicationRef"][0]]["localAddress"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceType"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceId"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[0]]["IpAddress"] 
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[0]]["NextHopInfo"] 
-
-        # Configure Destination SDP
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["node-id"] = ep_transport_objects[1].split(" ", 1)[1]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[1]]["EpApplicationRef"][0]]["localAddress"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceType"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceId"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[1]]["IpAddress"] 
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[1]]["NextHopInfo"] 
-
-        # Configure Connection Group and match-criteria
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["connection-groups"]["connection-group"][0]["id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
-
-        return ietf_i
-    
-    ### Mapper functionalities
-    def __slo_viability(self, slice_slos, nrp_slos):
-        """
-        Compare Service Level Objectives (SLOs) between a slice and a Network Resource Partition (NRP).
-
-        This method assesses whether an NRP can satisfy the SLOs of a network slice.
-
-        Args:
-            slice_slos (list): Service Level Objectives of the slice
-            nrp_slos (dict): Service Level Objectives of the Network Resource Pool
-
-        Returns:
-            tuple: A boolean indicating viability and a flexibility score
-                - First value: True if NRP meets SLOs, False otherwise
-                - Second value: A score representing how well the NRP meets the SLOs
-        """
-        # Define SLO types for maximum and minimum constraints
-        slo_type = {
-            "max": ["one-way-delay-maximum", "two-way-delay-maximum", "one-way-delay-percentile", "two-way-delay-percentile",
-                    "one-way-delay-variation-maximum", "two-way-delay-variation-maximum",
-                    "one-way-delay-variation-percentile", "two-way-delay-variation-percentile",
-                    "one-way-packet-loss", "two-way-packet-loss"],
-            "min": ["one-way-bandwidth", "two-way-bandwidth", "shared-bandwidth"]
-        }
-        flexibility_scores = []
-        for slo in slice_slos:
-            for nrp_slo in nrp_slos['slos']:
-                if slo["metric-type"] == nrp_slo["metric-type"]:
-                    # Handle maximum type SLOs
-                    if slo["metric-type"] in slo_type["max"]:
-                        flexibility = (nrp_slo["bound"] - slo["bound"]) / slo["bound"]
-                        if slo["bound"] > nrp_slo["bound"]:
-                            return False, 0  # Does not meet maximum constraint
-                    # Handle minimum type SLOs
-                    if slo["metric-type"] in slo_type["min"]:
-                        flexibility = (slo["bound"] - nrp_slo["bound"]) / slo["bound"]
-                        if slo["bound"] < nrp_slo["bound"]:
-                            return False, 0  # Does not meet minimum constraint
-                    flexibility_scores.append(flexibility)
-                    break  # Exit inner loop after finding matching metric
-            
-            # Calculate final viability score
-            score = sum(flexibility_scores) / len(flexibility_scores) if flexibility_scores else 0
-        return True, score  # Si pasó todas las verificaciones, la NRP es viable
-    
-    ### Realizer functionalities.
-    def __nrp(self, request, nrp):
-        """
-        Manage Network Resource Partition (NRP) operations.
-
-        This method handles CRUD operations for Network Resource Partitions,
-        interacting with Network Controllers (currently done statically via a JSON-based database file).
-
-        Args:
-            request (str): The type of operation to perform. 
-                Supported values:
-                - "CREATE": Add a new NRP to the database
-                - "READ": Retrieve the current NRP view
-                - "UPDATE": Update an existing NRP (currently a placeholder)
-
-            nrp (dict): The Network Resource Partition details to create or update.
-
-        Returns:
-            None or answer: 
-            - For "CREATE": Returns the response from the controller (currently using a static JSON)
-            - For "READ": Gets the NRP view from the controller (currently using a static JSON)
-            - For "UPDATE": Placeholder for update functionality
-
-        Notes:
-            - Uses a local JSON file "nrp_ddbb.json" to store NRP information as controller operation is not yet defined
-        """
-        if request == "CREATE":
-            # TODO: Implement actual request to Controller to create an NRP
-            logging.debug("Creating NRP")
-
-            # Load existing NRP database
-            with open(os.path.join(SRC_PATH, "nrp_ddbb.json"), "r") as archivo:
-                self.__nrp_view = json.load(archivo)
-
-            # Append new NRP to the view
-            self.__nrp_view.append(nrp)
-
-            # Placeholder for controller POST request
-            answer = None
-            return answer
-        elif request == "READ":
-            # TODO: Request to Controller to get topology and current NRP view
-            logging.debug("Reading Topology")
-
-            # Load NRP database
-            with open(os.path.join(SRC_PATH, "nrp_ddbb.json"), "r") as archivo:
-                self.__nrp_view = json.load(archivo)
-            
-        elif request == "UPDATE":
-            # TODO: Implement request to Controller to update NRP
-            logging.debug("Updating NRP")
-            answer = ""
-    
-    def __select_way(self, controller=None, way=None, ietf_intent=None):
-        """
-        Determine the method of slice realization.
-
-        Args:
-            controller (str): The controller to use for slice realization.
-                Supported values:
-                - "IXIA": IXIA NEII for network testing
-                - "TFS": TeraFlow Service for network slice management
-            way (str): The type of technology to use.
-                Supported values:
-                - "L2VPN": Layer 2 Virtual Private Network
-                - "L3VPN": Layer 3 Virtual Private Network
-
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: A realization request for the specified network slice type.
-
-        """
-        realizing_request = None
-        if controller == "TFS":
-            if way == "L2VPN":
-                realizing_request = self.__tfs_l2vpn(ietf_intent)
-            elif way == "L3VPN":
-                realizing_request = self.__tfs_l3vpn(ietf_intent)
-            else:
-                logging.warning(f"Unsupported way: {way}. Defaulting to L2VPN realization.")
-                realizing_request = self.__tfs_l2vpn(ietf_intent)
-        elif controller == "IXIA":
-            realizing_request = self.__ixia(ietf_intent)
-        else:
-            logging.warning(f"Unsupported controller: {controller}. Defaulting to TFS L2VPN realization.")
-            realizing_request = self.__tfs_l2vpn(ietf_intent)
-        return realizing_request
-
-    def __tfs_l2vpn(self, ietf_intent):
-        """
-        Translate slice intent into a TeraFlow service request.
-
-        This method prepares a L2VPN service request by:
-        1. Defining endpoint routers
-        2. Loading a service template
-        3. Generating a unique service UUID
-        4. Configuring service endpoints
-        5. Adding QoS constraints
-        6. Preparing configuration rules for network interfaces
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: A TeraFlow service request for L2VPN configuration.
-
-        """
-        # Hardcoded router endpoints
-        # TODO (should be dynamically determined)
-        origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-        destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-
-        # Extract QoS Profile from intent
-        QoSProfile = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
-        vlan_value = 0
-
-        self.answer[self.subnet]["QoS Requirements"] = []
-
-        # Populate response with QoS requirements and VLAN from intent
-        slo_policy = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]
-
-        # Process metrics
-        for metric in slo_policy.get("metric-bound", []):
-            constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
-            constraint_value = str(metric["bound"])
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": constraint_type,
-                "constraint_value": constraint_value
-            })
-
-        # Availability
-        if "availability" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "availability[%]",
-                "constraint_value": str(slo_policy["availability"])
-            })
-
-        # MTU
-        if "mtu" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "mtu[bytes]",
-                "constraint_value": str(slo_policy["mtu"])
-            })
-
-        # VLAN
-        vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
-        self.answer[self.subnet]["VLAN"] = vlan_value
-
-        if UPLOAD_TYPE == "WEBUI":
-            # Load L2VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "L2-VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))["services"][0]
-
-            # Generate unique service UUID
-            tfs_request["service_id"]["service_uuid"]["uuid"] += "-" + str(int(datetime.now().timestamp() * 1e7))
-
-            # Configure service endpoints
-            for endpoint in tfs_request["service_endpoint_ids"]:
-                endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
-                endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
-
-            # Add service constraints
-            for constraint in self.answer[self.subnet]["QoS Requirements"]:
-                tfs_request["service_constraints"].append({"custom": constraint})
-
-            # Add configuration rules
-            for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
-                router_id = origin_router_id if i == 1 else destination_router_id
-                router_if = origin_router_if if i == 1 else destination_router_if
-                resource_value = config_rule["custom"]["resource_value"]
-
-                sdp_index = i - 1
-                vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
-                if vlan_value:
-                    resource_value["vlan_id"] = int(vlan_value)
-                resource_value["circuit_id"] = vlan_value
-                resource_value["remote_router"] = destination_router_id if i == 1 else origin_router_id
-                resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
-                config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
-
-        elif UPLOAD_TYPE == "NBI":
-            self.path = NBI_L2_PATH
-            # Load IETF L2VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "ietfL2VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))
-
-            # Generate service UUID
-            full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-            uuid_only = full_id.split("slice-service-")[-1]
-            tfs_request["ietf-l2vpn-svc:vpn-service"][0]["vpn-id"] = uuid_only
-
-            # Configure service endpoints
-            sites = tfs_request["ietf-l2vpn-svc:vpn-service"][0]["site"]
-            sdps = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"]
-
-            for i, site in enumerate(sites):
-                is_origin = (i == 0)
-                router_id = origin_router_id if is_origin else destination_router_id
-                sdp = sdps[0] if is_origin else sdps[1]
-                site["site-id"] = router_id
-                site["site-location"] = sdp["node-id"]
-                site["site-network-access"]["interface"]["ip-address"] = sdp["sdp-ip-address"]
-
-        logging.info(f"L2VPN Intent realized\n")
-        return tfs_request
-    
-    def __tfs_l2vpn_support(self, requests):
-        """
-        Configuration support for L2VPN with path selection based on MPLS traffic-engineering tunnels
-
-        Args:
-            requests (list): A list of configuration parameters.
-
-        """
-        sources={
-            "source": "10.60.125.44",
-            "config":[]
-        }
-        destinations={
-            "destination": "10.60.125.45",
-            "config":[]
-        }
-        for request in requests:
-            # Configure Source Endpoint
-            temp_source = request["service_config"]["config_rules"][1]["custom"]["resource_value"]
-            endpoints = request["service_endpoint_ids"]
-            config = {
-                "ni_name": temp_source["ni_name"],
-                "remote_router": temp_source["remote_router"],
-                "interface": endpoints[0]["endpoint_uuid"]["uuid"].replace("0/0/0-", ""),
-                "vlan" : temp_source["vlan_id"],
-                "number" : temp_source["vlan_id"] % 10 + 1
-            }
-            sources["config"].append(config)
-
-            # Configure Destination Endpoint
-            temp_destiny = request["service_config"]["config_rules"][2]["custom"]["resource_value"]
-            config = {
-                "ni_name": temp_destiny["ni_name"],
-                "remote_router": temp_destiny["remote_router"],
-                "interface": endpoints[1]["endpoint_uuid"]["uuid"].replace("0/0/3-", ""),
-                "vlan" : temp_destiny["vlan_id"],
-                "number" : temp_destiny["vlan_id"] % 10 + 1
-            }
-            destinations["config"].append(config)
-         
-        #cisco_source = cisco_connector(source_address, ni_name, remote_router, vlan, vlan % 10 + 1)
-        cisco_source = cisco_connector(sources["source"], sources["config"])
-        commands = cisco_source.full_create_command_template()
-        cisco_source.execute_commands(commands)
-
-        #cisco_destiny = cisco_connector(destination_address, ni_name, remote_router, vlan, vlan % 10 + 1)
-        cisco_destiny = cisco_connector(destinations["destination"], destinations["config"])
-        commands = cisco_destiny.full_create_command_template()
-        cisco_destiny.execute_commands(commands)
-
-    def __tfs_l2vpn_delete(self):
-        """
-        Delete L2VPN configurations from Cisco devices.
-
-        This method removes L2VPN configurations from Cisco routers
-
-        Notes:
-            - Uses cisco_connector to generate and execute deletion commands
-            - Clears Network Interface (NI) settings
-        """
-        # Delete Source Endpoint Configuration
-        source_address = "10.60.125.44"
-        cisco_source = cisco_connector(source_address)
-        cisco_source.execute_commands(cisco_source.create_command_template_delete())
-
-        # Delete Destination Endpoint Configuration
-        destination_address = "10.60.125.45"
-        cisco_destiny = cisco_connector(destination_address)
-        cisco_destiny.execute_commands(cisco_destiny.create_command_template_delete())
-    
-    def __tfs_l3vpn(self, ietf_intent):
-        """
-        Translate L3VPN (Layer 3 Virtual Private Network) intent into a TeraFlow service request.
-
-        Similar to __tfs_l2vpn, but configured for Layer 3 VPN:
-        1. Defines endpoint routers
-        2. Loads service template
-        3. Generates unique service UUID
-        4. Configures service endpoints
-        5. Adds QoS constraints
-        6. Prepares configuration rules for network interfaces
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: A TeraFlow service request for L3VPN configuration.
-        """
-        # Hardcoded router endpoints
-        # TODO (should be dynamically determined)
-        origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-        destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-
-        # Extract QoS Profile from intent
-        QoSProfile = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
-        vlan_value = 0
-
-        self.answer[self.subnet]["QoS Requirements"] = []
-
-        # Populate response with QoS requirements and VLAN from intent
-        slo_policy = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]
-
-        # Process metrics
-        for metric in slo_policy.get("metric-bound", []):
-            constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
-            constraint_value = str(metric["bound"])
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": constraint_type,
-                "constraint_value": constraint_value
-            })
-
-        # Availability
-        if "availability" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "availability[%]",
-                "constraint_value": str(slo_policy["availability"])
-            })
-
-        # MTU
-        if "mtu" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "mtu[bytes]",
-                "constraint_value": str(slo_policy["mtu"])
-            })
-
-        # VLAN
-        vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
-        self.answer[self.subnet]["VLAN"] = vlan_value
-
-        if UPLOAD_TYPE == "WEBUI":
-            # Load L3VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "L3-VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))["services"][0]
-            
-            # Generate unique service UUID
-            tfs_request["service_id"]["service_uuid"]["uuid"] += "-" + str(int(datetime.now().timestamp() * 1e7))
-
-            # Configure service endpoints
-            for endpoint in tfs_request["service_endpoint_ids"]:
-                endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
-                endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
-
-            # Add service constraints
-            for constraint in self.answer[self.subnet]["QoS Requirements"]:
-                tfs_request["service_constraints"].append({"custom": constraint})
-
-            # Add configuration rules
-            for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
-                router_id = origin_router_id if i == 1 else destination_router_id
-                router_if = origin_router_if if i == 1 else destination_router_if
-                resource_value = config_rule["custom"]["resource_value"]
-
-                sdp_index = i - 1
-                vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
-                resource_value["router_id"] = destination_router_id if i == 1 else origin_router_id
-                resource_value["vlan_id"] = int(vlan_value)
-                resource_value["address_ip"] = destination_router_id if i == 1 else origin_router_id
-                resource_value["policy_AZ"] = "policyA"
-                resource_value["policy_ZA"] = "policyB"
-                resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
-                config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
-        
-        elif UPLOAD_TYPE == "NBI":
-            self.path = NBI_L3_PATH
-            # Load IETF L3VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "ietfL3VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))
-
-            # Generate service UUID
-            full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-            tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["vpn-services"]["vpn-service"][0]["vpn-id"] = full_id
-            # Configure service endpoints
-            for i, site in enumerate(tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["sites"]["site"]):
-
-                # Determine if origin or destination
-                is_origin = (i == 0)
-                sdp_index = 0 if is_origin else 1
-                location = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["node-id"]
-                router_id = origin_router_id if is_origin else destination_router_id
-                router_if = origin_router_if if is_origin else destination_router_if
-
-                # Assign common values
-                site["site-id"] = f"site_{location}"
-                site["locations"]["location"][0]["location-id"] = location
-                site["devices"]["device"][0]["device-id"] = router_id
-                site["devices"]["device"][0]["location"] = location
-
-                access = site["site-network-accesses"]["site-network-access"][0]
-                access["site-network-access-id"] = router_if
-                access["device-reference"] = router_id
-                access["vpn-attachment"]["vpn-id"] = full_id
-
-                # Aplicar restricciones QoS
-                for constraint in self.answer[self.subnet]["QoS Requirements"]:
-                    ctype = constraint["constraint_type"]
-                    cvalue = float(constraint["constraint_value"])
-                    if constraint["constraint_type"].startswith("one-way-bandwidth"):
-                            unit = constraint["constraint_type"].split("[")[-1].rstrip("]")
-                            multiplier = {"bps": 1, "kbps": 1_000, "Mbps": 1_000_000, "Gbps": 1_000_000_000}.get(unit, 1)
-                            value = int(cvalue * multiplier)
-                            access["service"]["svc-input-bandwidth"] = value
-                            access["service"]["svc-output-bandwidth"] = value
-                    elif ctype == "one-way-delay-maximum[milliseconds]":
-                        access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["latency"]["latency-boundary"] = int(cvalue)
-                    elif ctype == "availability[%]":
-                        access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["bandwidth"]["guaranteed-bw-percent"] = int(cvalue)
-                    elif ctype == "mtu[bytes]":
-                        access["service"]["svc-mtu"] = int(cvalue)
-
-        
-        logging.info(f"L3VPN Intent realized\n")
-        self.answer[self.subnet]["VLAN"] = vlan_value
-        return tfs_request
-
-    def __ixia(self, ietf_intent):
-        """
-        Prepare an Ixia service request based on the IETF intent.
-
-        This method configures an Ixia service request by:
-        1. Defining endpoint routers
-        2. Loading a service template
-        3. Generating a unique service UUID
-        4. Configuring service endpoints
-        5. Adding QoS constraints
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: An Ixia service request for configuration.
-        """
-        self.answer[self.subnet]["QoS Requirements"] = []
-                # Add service constraints
-        for i, constraint in enumerate(ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]):
-            bound = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["bound"]
-            metric_type = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["metric-type"]
-            metric_unit = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["metric-unit"]
-            service_constraint ={
-                "custom": {
-                    "constraint_type": f"{metric_type}[{metric_unit}]",
-                    "constraint_value": f"{bound}"
-                }
-            }
-            self.answer[self.subnet]["QoS Requirements"].append(service_constraint["custom"])
-        self.answer[self.subnet]["VLAN"] = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
-        # Extraer la lista de métricas de forma segura
-        metric_bounds = ietf_intent.get("ietf-network-slice-service:network-slice-services", {}) \
-            .get("slo-sle-templates", {}) \
-            .get("slo-sle-template", [{}])[0] \
-            .get("slo-policy", {}) \
-            .get("metric-bound", [])
-
-        # Inicializar valores
-        bandwidth = None
-        latency = None
-        tolerance = None
-
-        # Asignar valores según el tipo de métrica
-        for metric in metric_bounds:
-            metric_type = metric.get("metric-type")
-            bound = metric.get("bound")
-
-            if metric_type == "one-way-bandwidth":
-                bandwidth = bound
-            elif metric_type == "one-way-delay-maximum":
-                latency = bound
-            elif metric_type == "one-way-delay-variation-maximum": 
-                tolerance = bound
-
-        # Construcción del diccionario intent
-        intent = {
-            "src_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slice-service", [{}])[0]
-                .get("sdps", {}).get("sdp", [{}])[0]
-                .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
-                .get("sdp-peering", {}).get("peer-sap-id"),
-
-            "dst_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slice-service", [{}])[0]
-                .get("sdps", {}).get("sdp", [{}, {}])[1]
-                .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
-                .get("sdp-peering", {}).get("peer-sap-id"),
-
-            "vlan_id": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slice-service", [{}])[0]
-                .get("sdps", {}).get("sdp", [{}])[0]
-                .get("service-match-criteria", {}).get("match-criterion", [{}])[0]
-                .get("value"),
-
-            "bandwidth": bandwidth,
-            "latency": latency,
-            "tolerance": tolerance,
-
-            "latency_version": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
-                .get("description"),
-
-            "reliability": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
-                .get("sle-policy", {}).get("reliability"),
-        }
-
-        logging.info(f"IXIA Intent realized\n")
-        return intent
-    
diff --git a/src/planner/energy_planner/energy.py b/src/planner/energy_planner/energy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6d33c835e51044d4f3ec29159a3349a9e549fc0
--- /dev/null
+++ b/src/planner/energy_planner/energy.py
@@ -0,0 +1,393 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, random, os, json, heapq  
+from src.config.constants import SRC_PATH
+from flask import current_app
+from src.utils.safe_get import safe_get
+
+
+def energy_planner(intent):
+    """
+    Plan an optimal network path based on energy consumption metrics.
+
+    This function calculates the most energy-efficient path between source
+    and destination nodes, considering energy consumption, carbon emissions,
+    energy efficiency, and renewable energy usage constraints.
+
+    Args:
+        intent (dict): Network slice intent containing service delivery points
+                      and energy-related SLO constraints
+
+    Returns:
+        list or None: Ordered list of node names representing the optimal path,
+                     or None if no valid path is found or topology is not recognized
+
+    Notes:
+        - Only supports topology with nodes A through G
+        - Can use external PCE or internal Dijkstra-based algorithm
+        - Considers DLOS (Delay and Loss Objectives) for energy metrics:
+          EC (Energy Consumption), CE (Carbon Emission), 
+          EE (Energy Efficiency), URE (Renewable Energy Usage)
+
+    Raises:
+        Exception: For errors in energy metrics or topology retrieval
+    """    
+    energy_metrics = retrieve_energy()
+    topology = retrieve_topology()
+    source = safe_get(intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 0, "node-id"])
+    destination = safe_get(intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 1, "node-id"])
+    optimal_path = []
+    allowed_ids = {"A", "B", "C", "D", "E", "F", "G"}
+
+    if source not in allowed_ids or destination not in allowed_ids:
+        logging.warning(f"Topology not recognized (source: {source}, destination: {destination}). Skipping energy-based planning.")
+        return None
+    
+    # If using an external PCE
+    if current_app.config["PCE_EXTERNAL"]:
+        logging.debug("Using external PCE for path planning")    
+        def build_slice_input(node_source, node_destination):
+            """Build input format for external PCE slice computation."""
+            return {
+                "clientName": "demo-client",
+                "requestId": random.randint(1000, 9999),
+                "sites": [node_source["nodeId"], node_destination["nodeId"]],
+                "graph": {
+                    "nodes": [
+                        {
+                            "nodeId": node_source["nodeId"],
+                            "name": node_source["name"],
+                            "footprint": node_source["footprint"],
+                            "sticky": [node_source["nodeId"]]
+                        },
+                        {
+                            "nodeId": node_destination["nodeId"],
+                            "name": node_destination["name"],
+                            "footprint": node_destination["footprint"],
+                            "sticky": [node_destination["nodeId"]]
+                        }
+                    ],
+                    "links": [
+                        {
+                            "fromNodeId": node_source["nodeId"],
+                            "toNodeId": node_destination["nodeId"],
+                            "bandwidth": 1000000000,
+                            "metrics": [
+                                {
+                                    "metric": "DELAY",
+                                    "value": 10,
+                                    "bound": True,
+                                    "required": True
+                                }
+                            ]
+                        }
+                    ],
+                    "constraints": {
+                        "maxVulnerability": 3,
+                        "maxDeployedServices": 10,
+                        "metricLimits": []
+                    }
+                }
+            }
+        
+        source = next((node for node in topology["nodes"] if node["name"] == source), None)
+        destination = next((node for node in topology["nodes"] if node["name"] == destination), None)
+        slice_input = build_slice_input(source, destination)
+
+        def simulate_slice_output(input_data):
+            """
+            Simulate external PCE response for slice computation.
+            
+            Args:
+                input_data (dict): Input data for slice computation
+                
+            Returns:
+                dict: Simulated slice output with path information
+            """
+            return {
+                "input": input_data,
+                "slice": {
+                    "nodes": [
+                        {"site": 1, "service": 1},
+                        {"site": 2, "service": 2}
+                    ],
+                    "links": [
+                        {
+                            "fromNodeId": 1,
+                            "toNodeId": 2,
+                            "lspId": 500,
+                            "path": {
+                                "ingressNodeId": 1,
+                                "egressNodeId": 2,
+                                "hops": [
+                                    {"nodeId": 3, "linkId": "A-C", "portId": 1},
+                                    {"nodeId": 2, "linkId": "C-B", "portId": 2}
+                                ]
+                            }
+                        }
+                    ],
+                    "metric": {"value": 9}
+                },
+                "error": None
+            }
+        
+        slice_output = simulate_slice_output(slice_input)
+        # Build optimal path from PCE response
+        optimal_path.append(source["name"])
+        for link in slice_output["slice"]["links"]:
+            for hop in link["path"]["hops"]:
+                optimal_path.append(next((node for node in topology["nodes"] if node["nodeId"] == hop['nodeId']), None)["name"])
+    
+    else:
+        logging.debug("Using internal PCE for path planning")
+        ietf_dlos = intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+        logging.debug(ietf_dlos)
+        
+        # Extract DLOS (Delay and Loss Objectives) constraints
+        dlos = {
+            "EC": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_consumption"), None),
+            "CE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "carbon_emission"), None),
+            "EE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_efficiency"), None),
+            "URE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "renewable_energy_usage"), None)
+        }
+        logging.debug(f"Planning optimal path from {source} to {destination} with DLOS: {dlos}")
+        optimal_path = calculate_optimal_path(topology, energy_metrics, source, destination, dlos)
+
+    if not optimal_path:
+        logging.error("No valid energy path found")
+        return None
+
+    return optimal_path
+
+
+def retrieve_energy():
+    """
+    Retrieve energy consumption data for network nodes.
+    
+    Returns:
+        dict: Energy metrics including power consumption, carbon emissions,
+              efficiency, and renewable energy usage for each node
+              
+    Notes:
+        TODO: Implement logic to retrieve real-time data from controller
+        Currently reads from static JSON file
+    """
+    with open(os.path.join(SRC_PATH, "planner/energy_planner/energy_ddbb.json"), "r") as archivo:
+        energy_metrics = json.load(archivo)
+    return energy_metrics
+
+
+def retrieve_topology():
+    """
+    Retrieve network topology information.
+    
+    Returns:
+        dict: Network topology with nodes and links
+        
+    Notes:
+        - If PCE_EXTERNAL is True, retrieves topology for external PCE format
+        - Otherwise retrieves topology in internal format
+        TODO: Implement logic to retrieve real-time data from controller
+        Currently reads from static JSON files
+    """
+    if current_app.config["PCE_EXTERNAL"]:
+        # TODO: Implement the logic to retrieve topology data from external PCE
+        # GET /sss/v1/topology/node and /sss/v1/topology/link
+        with open(os.path.join(SRC_PATH, "planner/energy_planner/ext_topo_ddbb.json"), "r") as archivo:
+            topology = json.load(archivo)
+    else:
+        # TODO: Implement the logic to retrieve topology data from controller
+        with open(os.path.join(SRC_PATH, "planner/energy_planner/topo_ddbb.json"), "r") as archivo:
+            topology = json.load(archivo)
+    return topology
+
+
+def calculate_optimal_path(topology, energy_metrics, source, destination, dlos):
+    """
+    Calculate the optimal path using Dijkstra's algorithm with energy constraints.
+    
+    This function implements a constrained shortest path algorithm that considers
+    energy consumption, carbon emissions, energy efficiency, and renewable energy
+    usage as optimization criteria.
+    
+    Args:
+        topology (dict): Network topology with nodes and links
+        energy_metrics (dict): Energy consumption data for each node
+        source (str): Source node identifier
+        destination (str): Destination node identifier
+        dlos (dict): Constraint bounds for:
+                    - EC: Energy Consumption limit
+                    - CE: Carbon Emission limit
+                    - EE: Energy Efficiency limit
+                    - URE: Minimum Renewable Energy Usage
+    
+    Returns:
+        list: Ordered list of node names forming the optimal path,
+              or empty list if no valid path exists
+              
+    Notes:
+        - Uses modified Dijkstra's algorithm with multiple constraints
+        - Paths violating any DLOS constraint are discarded
+        - Node weights computed using compute_node_weight function
+    """
+    logging.debug("Starting optimal path calculation...")
+    
+    # Create a dictionary with the weights of each node
+    node_data_map = {}
+    for node_data in energy_metrics:
+        node_id = node_data["name"]
+        ec = node_data["typical-power"]
+        ce = node_data["carbon-emissions"]
+        ee = node_data["efficiency"]
+        ure = node_data["renewable-energy-usage"]
+
+        total_power_supply = sum(ps["typical-power"] for ps in node_data["power-supply"])
+        total_power_boards = sum(b["typical-power"] for b in node_data["boards"])
+        total_power_components = sum(c["typical-power"] for c in node_data["components"])
+        total_power_transceivers = sum(t["typical-power"] for t in node_data["transceivers"])
+
+        logging.debug(f"Node {node_id}: EC={ec}, CE={ce}, EE={ee}, URE={ure}")
+        logging.debug(f"Node {node_id}: PS={total_power_supply}, BO={total_power_boards}, CO={total_power_components}, TR={total_power_transceivers}")
+
+        weight = compute_node_weight(ec, ce, ee, ure,
+                                            total_power_supply,
+                                            total_power_boards,
+                                            total_power_components,
+                                            total_power_transceivers)
+        logging.debug(f"Weight for node {node_id}: {weight}")
+        
+        node_data_map[node_id] = {
+            "weight": weight,
+            "ec": ec,
+            "ce": ce,
+            "ee": ee,
+            "ure": ure
+        }
+
+    # Create a graph representation of the topology
+    graph = {}
+    for node in topology["ietf-network:networks"]["network"][0]["node"]:
+        graph[node["node-id"]] = []
+    for link in topology["ietf-network:networks"]["network"][0]["link"]:
+        src = link["source"]["source-node"]
+        dst = link["destination"]["dest-node"]
+        graph[src].append((dst, node_data_map[dst]["weight"]))
+        logging.debug(f"Added link: {src} -> {dst} with weight {node_data_map[dst]['weight']}")
+
+    # Dijkstra's algorithm with restrictions
+    # Queue: (accumulated cost, current node, path, sum_ec, sum_ce, sum_ee, min_ure)
+    queue = [(0, source, [], 0, 0, 0, 1)]
+    visited = set()
+
+    logging.debug(f"Starting search from {source} to {destination} with restrictions: {dlos}")
+
+    while queue:
+        cost, node, path, sum_ec, sum_ce, sum_ee, min_ure = heapq.heappop(queue)
+        logging.debug(f"Exploring node {node} with cost {cost} and path {path + [node]}")
+        
+        if node in visited:
+            logging.debug(f"Node {node} already visited, skipped.")
+            continue
+        visited.add(node)
+        path = path + [node]
+
+        node_metrics = node_data_map[node]
+        sum_ec += node_metrics["ec"]
+        sum_ce += node_metrics["ce"]
+        sum_ee += node_metrics["ee"]
+        min_ure = min(min_ure, node_metrics["ure"]) if path[:-1] else node_metrics["ure"]
+
+        logging.debug(f"Accumulated -> EC: {sum_ec}, CE: {sum_ce}, EE: {sum_ee}, URE min: {min_ure}")
+
+        # Check constraint violations
+        if dlos["EC"] is not None and sum_ec > dlos["EC"]:
+            logging.debug(f"Discarded path {path} for exceeding EC ({sum_ec} > {dlos['EC']})")
+            continue
+        if dlos["CE"] is not None and sum_ce > dlos["CE"]:
+            logging.debug(f"Discarded path {path} for exceeding CE ({sum_ce} > {dlos['CE']})")
+            continue
+        if dlos["EE"] is not None and sum_ee > dlos["EE"]:
+            logging.debug(f"Discarded path {path} for exceeding EE ({sum_ee} > {dlos['EE']})")
+            continue
+        if dlos["URE"] is not None and min_ure < dlos["URE"]:
+            logging.debug(f"Discarded path {path} for not reaching minimum URE ({min_ure} < {dlos['URE']})")
+            continue
+
+        if node == destination:
+            logging.debug(f"Destination {destination} reached with a valid path: {path}")
+            return path
+
+        for neighbor, weight in graph.get(node, []):
+            if neighbor not in visited:
+                logging.debug(f"Queue -> neighbour: {neighbor}, weight: {weight}")
+                heapq.heappush(queue, (
+                    cost + weight,
+                    neighbor,
+                    path,
+                    sum_ec,
+                    sum_ce,
+                    sum_ee,
+                    min_ure
+                ))
+    
+    logging.debug("No valid path found that meets the restrictions.")
+    return []
+
+
+def compute_node_weight(ec, ce, ee, ure, total_power_supply, total_power_boards, 
+                       total_power_components, total_power_transceivers, 
+                       alpha=1, beta=1, gamma=1, delta=1):
+    """
+    Calculate node weight based on energy and environmental metrics.
+    
+    Computes a green index that represents the environmental impact of routing
+    traffic through a node, considering power consumption and carbon emissions.
+    
+    Args:
+        ec (float): Base energy consumption of the node
+        ce (float): Carbon emissions factor
+        ee (float): Energy efficiency metric
+        ure (float): Renewable energy usage ratio (0-1)
+        total_power_supply (float): Total power from supply units
+        total_power_boards (float): Total power consumed by boards
+        total_power_components (float): Total power consumed by components
+        total_power_transceivers (float): Total power consumed by transceivers
+        alpha (float, optional): Weight for energy consumption. Defaults to 1
+        beta (float, optional): Weight for carbon emissions. Defaults to 1
+        gamma (float, optional): Weight for energy efficiency. Defaults to 1
+        delta (float, optional): Weight for renewable energy. Defaults to 1
+    
+    Returns:
+        float: Computed green index representing environmental impact
+        
+    Notes:
+        Formula: green_index = (power_idle + power_traffic) * time / 1000 * (1 - ure) * ce
+        - Assumes 100 units of traffic
+        - Measured over 1 hour time period
+    """
+    traffic = 100
+    # Measure one hour of traffic
+    time = 1
+
+    power_idle = ec + total_power_supply + total_power_boards + total_power_components + total_power_transceivers
+    power_traffic = traffic * ee
+
+    power_total = (power_idle + power_traffic)
+
+    green_index = power_total * time / 1000 * (1 - ure) * ce
+
+    return green_index
\ No newline at end of file
diff --git a/src/planner/energy_ddbb.json b/src/planner/energy_planner/energy_ddbb.json
similarity index 100%
rename from src/planner/energy_ddbb.json
rename to src/planner/energy_planner/energy_ddbb.json
diff --git a/src/planner/ext_topo_ddbb.json b/src/planner/energy_planner/ext_topo_ddbb.json
similarity index 100%
rename from src/planner/ext_topo_ddbb.json
rename to src/planner/energy_planner/ext_topo_ddbb.json
diff --git a/src/planner/topo_ddbb.json b/src/planner/energy_planner/topo_ddbb.json
similarity index 100%
rename from src/planner/topo_ddbb.json
rename to src/planner/energy_planner/topo_ddbb.json
diff --git a/src/planner/hrat_planner/hrat.py b/src/planner/hrat_planner/hrat.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b370d7430016c0e4c2effff8de6bbbbd87f8bd2
--- /dev/null
+++ b/src/planner/hrat_planner/hrat.py
@@ -0,0 +1,85 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, requests
+
+def hrat_planner(data: str, ip: str, action: str = "create") -> dict:
+    """
+    Interface with the HRAT (Hierarchical Resource Allocation Tool) for transport network slice management.
+    
+    This function communicates with an external HRAT service to create or delete
+    transport network slices, handling optical layer provisioning and IP layer
+    configuration.
+    
+    Args:
+        data (str or dict): Network slice UUID for deletion, or full intent data for creation
+        ip (str): IP address of the HRAT service
+        action (str, optional): Operation to perform - "create" or "delete". Defaults to "create"
+    
+    Returns:
+        dict: Response from HRAT service containing:
+            - network-slice-uuid: Unique identifier for the slice
+            - viability: Boolean indicating if slice is viable
+            - actions: List of configuration actions including:
+                * CREATE_OPTICAL_SLICE
+                * PROVISION_MEDIA_CHANNEL_OLS_PATH
+                * ACTIVATE_TRANSCEIVER
+                * CONFIG_VPNL3
+              
+    Notes:
+        - On timeout or connection errors, returns static fallback data
+        - HRAT service expected at port 9090
+        - Timeout set to 15 seconds for all requests
+        
+    Raises:
+        requests.exceptions.RequestException: On HTTP request failures (logged, not raised)
+    """
+    data_static = {'network-slice-uuid': 'ecoc25-short-path-a7764e55-9bdb-4e38-9386-02ff47a33225', 'viability': True, 'actions': [{'type': 'CREATE_OPTICAL_SLICE', 'layer': 'OPTICAL', 'content': {'tenant-uuid': 'ea4ade23-1444-4f93-aabc-4fcbe2ae74dd', 'service-interface-point': [{'uuid': 'e7444187-119b-5b2e-8a60-ee26b30c441a'}, {'uuid': 'b32b1623-1f64-59d2-8148-b035a8f77625'}], 'node': [{'uuid': '68eb48ac-b686-5653-bdaf-7ccaeecd0709', 'owned-node-edge-point': [{'uuid': '7fd74b80-2b5a-55e2-8ef7-82bf589c9591', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}, {'uuid': '7b9f0b65-2387-5352-bc36-7173639463f0', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}]}, {'uuid': 'f55351ce-a5c8-50a7-b506-95b40e08bce4', 'owned-node-edge-point': [{'uuid': 'da6d924d-9cb4-5add-817d-f83e910beb2e', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}, {'uuid': '577ec899-ad92-5a19-a140-405a3cdbaa17', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}]}], 'link': [{'uuid': '3beef785-bb26-5741-af10-c5e1838c1701'}, {'uuid': '6144c664-246a-58ed-bf0a-7ec4286625da'}]}, 'controller-uuid': 'TAPI Optical Controller'}, {'type': 'PROVISION_MEDIA_CHANNEL_OLS_PATH', 'layer': 'OPTICAL', 'content': {'ols-path-uuid': 'cfeae4cb-c305-4884-9945-8b0c0f040c98', 'src-sip-uuid': 'e7444187-119b-5b2e-8a60-ee26b30c441a', 'dest-sip-uuid': 'b32b1623-1f64-59d2-8148-b035a8f77625', 'direction': 'BIDIRECTIONAL', 'layer-protocol-name': 'PHOTONIC_MEDIA', 'layer-protocol-qualifier': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_MC', 'bandwidth-ghz': 100, 'link-uuid-path': ['3beef785-bb26-5741-af10-c5e1838c1701'], 'lower-frequency-mhz': '194700000', 'upper-frequency-mhz': '194800000', 'adjustment-granularity': 'G_6_25GHZ', 'grid-type': 'FLEX'}, 'controller-uuid': 'TAPI Optical Controller', 'tenant-uuid': 'ea4ade23-1444-4f93-aabc-4fcbe2ae74dd'}, {'type': 'ACTIVATE_TRANSCEIVER', 'layer': 'OPTICAL', 'content': {'node-uuid': 'Phoenix-1', 'termination-point-uuid': 'Ethernet110', 'transceiver-type': 'CFP2', 'frequency-ghz': 194700.0, 'spectrum-width-ghz': 100.0, 'tx-power-dbm': 0.0}, 'controller-uuid': 'IP Controller'}, {'type': 'ACTIVATE_TRANSCEIVER', 'layer': 'OPTICAL', 'content': {'node-uuid': 'Phoenix-2', 'termination-point-uuid': 'Ethernet220', 'transceiver-type': 'CFP2', 'frequency-ghz': 194700.0, 'spectrum-width-ghz': 100.0, 'tx-power-dbm': 0.0}, 'controller-uuid': 'IP Controller'}, {'type': 'CONFIG_VPNL3', 'layer': 'IP', 'content': {'tunnel-uuid': '9aae851a-eea9-4a28-969f-0e2c2196e936', 'src-node-uuid': 'Phoenix-1', 'src-ip-address': '10.10.1.1', 'src-ip-mask': '/24', 'src-vlan-id': 100, 'dest-node-uuid': 'Phoenix-2', 'dest-ip-address': '10.10.2.1', 'dest-ip-mask': '/24', 'dest-vlan-id': 100}, 'controller-uuid': 'IP Controller'}]}
+    url = f'http://{ip}:9090/api/resource-allocation/transport-network-slice-l3'
+    headers = {'Content-Type': 'application/json'}
+
+    try:
+        if action == "delete":
+            # Build deletion payload with slice ID
+            payload = {
+                "ietf-network-slice-service:network-slice-services": {
+                    "slice-service": [
+                        {
+                            "id": data
+                        }
+                    ]
+                }
+            }
+            response = requests.delete(url, headers=headers, json=payload, timeout=1)
+        elif action == "create":
+            response = requests.post(url, headers=headers, json=data, timeout=1)
+        else:
+            logging.error("Invalid action. Use 'create' or 'delete'.")
+            return data_static
+        
+        if response.ok:
+            return response.json()
+        else:
+            logging.error(f"Request failed with status code {response.status_code}: {response.text}")
+            return data_static
+
+    except requests.exceptions.RequestException as e:
+        logging.error(f"HTTP request failed: {e}. Returning default data")
+        return data_static
+    except Exception as e:
+        logging.error(f"Unexpected error: {e}")
+        return data_static
+
diff --git a/src/planner/planner.py b/src/planner/planner.py
index b5fb1ba1ee624fcc090d4c85dfc252f2463ac042..979783463ffc29e21e6edd2a7fe88864537081cd 100644
--- a/src/planner/planner.py
+++ b/src/planner/planner.py
@@ -14,273 +14,40 @@
 
 # This file is an original contribution from Telefonica Innovación Digital S.L.
 
-import logging, random, os, json, heapq  
-from src.Constants import SRC_PATH, PCE_EXTERNAL, DEFAULT_LOGGING_LEVEL
+import logging
+from src.planner.energy_planner.energy           import energy_planner
+from src.planner.hrat_planner.hrat               import hrat_planner
+from src.planner.tfs_optical_planner.tfs_optical import tfs_optical_planner
+from flask import current_app
 
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
-    level=DEFAULT_LOGGING_LEVEL,
-    format='%(levelname)s - %(message)s')
 
 class Planner:
+    """
+    Planner class to compute optimal paths for network slices.
+    Uses different strategies based on configuration.
+    """
     """
     Planner class to compute the optimal path for a network slice based on energy consumption and topology.
     """
 
-    def planner(self, intent):
+    def planner(self, intent, type):
         """
         Plan the optimal path for a network slice based on energy consumption and topology.
-        """
-        energy_metrics = self.__retrieve_energy()
-        topology = self.__retrieve_topology()
-        source = intent.get("ietf-network-slice-service:network-slice-services", {}).get("slice-service", [])[0].get("sdps", {}).get("sdp", [])[0].get("id") or "A"
-        destination = intent.get("ietf-network-slice-service:network-slice-services", {}).get("slice-service", [])[0].get("sdps", {}).get("sdp", [])[1].get("id") or "B"
-        optimal_path = []
-        # If using an external PCE
-        if PCE_EXTERNAL:
-            logging.info("Using external PCE for path planning")    
-            def build_slice_input(node_source, node_destination):
-                return {
-                    "clientName": "demo-client",
-                    "requestId": random.randint(1000, 9999),
-                    "sites": [node_source["nodeId"], node_destination["nodeId"]],
-                    "graph": {
-                        "nodes": [
-                            {
-                                "nodeId": node_source["nodeId"],
-                                "name": node_source["name"],
-                                "footprint": node_source["footprint"],
-                                "sticky": [node_source["nodeId"]]
-                            },
-                            {
-                                "nodeId": node_destination["nodeId"],
-                                "name": node_destination["name"],
-                                "footprint": node_destination["footprint"],
-                                "sticky": [node_destination["nodeId"]]
-                            }
-                        ],
-                        "links": [
-                            {
-                                "fromNodeId": node_source["nodeId"],
-                                "toNodeId": node_destination["nodeId"],
-                                "bandwidth": 1000000000,
-                                "metrics": [
-                                    {
-                                        "metric": "DELAY",
-                                        "value": 10,
-                                        "bound": True,
-                                        "required": True
-                                    }
-                                ]
-                            }
-                        ],
-                        "constraints": {
-                            "maxVulnerability": 3,
-                            "maxDeployedServices": 10,
-                            "metricLimits": []
-                        }
-                    }
-                }
-            source = next((node for node in topology["nodes"] if node["name"] == source), None)
-            destination = next((node for node in topology["nodes"] if node["name"] == destination), None)
-            slice_input = build_slice_input(source, destination)
-
-            # POST /sss/v1/slice/compute
-            def simulate_slice_output(input_data):
-                return {
-                    "input": input_data,
-                    "slice": {
-                        "nodes": [
-                            {"site": 1, "service": 1},
-                            {"site": 2, "service": 2}
-                        ],
-                        "links": [
-                            {
-                                "fromNodeId": 1,
-                                "toNodeId": 2,
-                                "lspId": 500,
-                                "path": {
-                                    "ingressNodeId": 1,
-                                    "egressNodeId": 2,
-                                    "hops": [
-                                        {"nodeId": 3, "linkId": "A-C", "portId": 1},
-                                        {"nodeId": 2, "linkId": "C-B", "portId": 2}
-                                    ]
-                                }
-                            }
-                        ],
-                        "metric": {"value": 9}
-                    },
-                    "error": None
-                }
-            slice_output = simulate_slice_output(slice_input)
-            # Mostrar resultado
-            optimal_path.append(source["name"])
-            for link in slice_output["slice"]["links"]:
-                for hop in link["path"]["hops"]:
-                    optimal_path.append(next((node for node in topology["nodes"] if node["nodeId"] == hop['nodeId']), None)["name"])
-        
-        else:
-            logging.info("Using internal PCE for path planning")
-            ietf_dlos = intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
-            logging.info(ietf_dlos),
-            # Solo asigna los DLOS que existan, el resto a None
-            dlos = {
-                "EC": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_consumption"), None),
-                "CE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "carbon_emission"), None),
-                "EE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_efficiency"), None),
-                "URE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "renewable_energy_usage"), None)
-            }
-            logging.debug(f"Planning optimal path from {source} to {destination} with DLOS: {dlos}")
-            optimal_path = self.__calculate_optimal_path(topology, energy_metrics, source, destination, dlos)
-
-        if not optimal_path:
-            logging.error("No valid path found")
-            raise Exception("No valid energy path found")
-
-        return optimal_path
-
-    def __retrieve_energy(self):
-        # TODO : Implement the logic to retrieve energy consumption data from controller
-        # Taking it from static file
-        with open(os.path.join(SRC_PATH, "planner/energy_ddbb.json"), "r") as archivo:
-            energy_metrics = json.load(archivo)
-        return energy_metrics
-
-    def __retrieve_topology(self):
-        if PCE_EXTERNAL:
-            # TODO : Implement the logic to retrieve topology data from external PCE
-            # GET /sss/v1/topology/node and /sss/v1/topology/link
-            with open(os.path.join(SRC_PATH, "planner/ext_topo_ddbb.json"), "r") as archivo:
-                topology = json.load(archivo)
-        else:
-            # TODO : Implement the logic to retrieve topology data from controller
-            # Taking it from static file
-            with open(os.path.join(SRC_PATH, "planner/topo_ddbb.json"), "r") as archivo:
-                topology = json.load(archivo)
-        return topology
-
-
-
-    def __calculate_optimal_path(self, topology, energy_metrics, source, destination, dlos):
-        logging.debug("Starting optimal path calculation...")
-        
-        # Create a dictionary with the weights of each node
-        node_data_map = {}
-        for node_data in energy_metrics:
-            node_id = node_data["name"]
-            ec = node_data["typical-power"]
-            ce = node_data["carbon-emissions"]
-            ee = node_data["efficiency"]
-            ure = node_data["renewable-energy-usage"]
-
-            total_power_supply = sum(ps["typical-power"] for ps in node_data["power-supply"])
-            total_power_boards = sum(b["typical-power"] for b in node_data["boards"])
-            total_power_components = sum(c["typical-power"] for c in node_data["components"])
-            total_power_transceivers = sum(t["typical-power"] for t in node_data["transceivers"])
-
-            logging.debug(f"Node {node_id}: EC={ec}, CE={ce}, EE={ee}, URE={ure}")
-            logging.debug(f"Node {node_id}: PS={total_power_supply}, BO={total_power_boards}, CO={total_power_components}, TR={total_power_transceivers}")
-
-            weight = self.__compute_node_weight(ec, ce, ee, ure,
-                                                total_power_supply,
-                                                total_power_boards,
-                                                total_power_components,
-                                                total_power_transceivers)
-            logging.debug(f"Weight for node {node_id}: {weight}")
-            
-            node_data_map[node_id] = {
-                "weight": weight,
-                "ec": ec,
-                "ce": ce,
-                "ee": ee,
-                "ure": ure
-            }
-
-        # Create a graph representation of the topology
-        graph = {}
-        for node in topology["ietf-network:networks"]["network"][0]["node"]:
-            graph[node["node-id"]] = []
-        for link in topology["ietf-network:networks"]["network"][0]["link"]:
-            src = link["source"]["source-node"]
-            dst = link["destination"]["dest-node"]
-            graph[src].append((dst, node_data_map[dst]["weight"]))
-            logging.debug(f"Added link: {src} -> {dst} with weight {node_data_map[dst]['weight']}")
 
-        # Dijkstra's algorithm with restrictions
-        queue = [(0, source, [], 0, 0, 0, 1)]  # (accumulated cost, current node, path, sum_ec, sum_ce, sum_ee, min_ure)
-        visited = set()
+        Args:
+            intent (dict): Network slice intent
+            type (str): Planner type (ENERGY, HRAT, TFS_OPTICAL)
 
-        logging.debug(f"Starting search from {source} to {destination} with restrictions: {dlos}")
-        
-
-        while queue:
-            cost, node, path, sum_ec, sum_ce, sum_ee, min_ure = heapq.heappop(queue)
-            logging.debug(f"Exploring node {node} with cost {cost} and path {path + [node]}")
-            
-            if node in visited:
-                logging.debug(f"Node {node} already visited, skipped.")
-                continue
-            visited.add(node)
-            path = path + [node]
-
-            node_metrics = node_data_map[node]
-            sum_ec += node_metrics["ec"]
-            sum_ce += node_metrics["ce"]
-            sum_ee += node_metrics["ee"]
-            min_ure = min(min_ure, node_metrics["ure"]) if path[:-1] else node_metrics["ure"]
-
-            logging.debug(f"Accumulated -> EC: {sum_ec}, CE: {sum_ce}, EE: {sum_ee}, URE min: {min_ure}")
-
-            if dlos["EC"] is not None and sum_ec > dlos["EC"]:
-                logging.debug(f"Discarded path {path} for exceeding EC ({sum_ec} > {dlos['EC']})")
-                continue
-            if dlos["CE"] is not None and sum_ce > dlos["CE"]:
-                logging.debug(f"Discarded path {path} for exceeding CE ({sum_ce} > {dlos['CE']})")
-                continue
-            if dlos["EE"] is not None and sum_ee > dlos["EE"]:
-                logging.debug(f"Discarded path {path} for exceeding EE ({sum_ee} > {dlos['EE']})")
-                continue
-            if dlos["URE"] is not None and min_ure < dlos["URE"]:
-                logging.debug(f"Discarded path {path} for not reaching minimum URE ({min_ure} < {dlos['URE']})")
-                continue
-
-            if node == destination:
-                logging.debug(f"Destination {destination} reached with a valid path: {path}")
-                return path
-
-            for neighbor, weight in graph.get(node, []):
-                if neighbor not in visited:
-                    logging.debug(f"Qeue -> neighbour: {neighbor}, weight: {weight}")
-                    heapq.heappush(queue, (
-                        cost + weight,
-                        neighbor,
-                        path,
-                        sum_ec,
-                        sum_ce,
-                        sum_ee,
-                        min_ure
-                    ))
-        logging.debug("No valid path found that meets the restrictions.")
-        return []
-
-
-    def __compute_node_weight(self, ec, ce, ee, ure, total_power_supply, total_power_boards, total_power_components, total_power_transceivers, alpha=1, beta=1, gamma=1, delta=1):
-        """
-        Calcula el peso de un nodo con la fórmula:
-        w(v) = α·EC + β·CE + γ/EE + δ·(1 - URE)
+        Returns:
+            dict or None: Planner result or None if type is invalid
         """
-        traffic = 100
-        # Measure one hour of traffic
-        time = 1
-
-        power_idle = ec + total_power_supply + total_power_boards + total_power_components + total_power_transceivers
-        power_traffic = traffic * ee
-
-        power_total = (power_idle + power_traffic)
-
-        green_index = power_total * time / 1000 * (1 - ure) * ce
-
-        return green_index 
-    
-
+        # Log selected planner type
+        logging.info(f"Planner type selected: {type}")
+        # Use energy planner strategy
+        if type   == "ENERGY"     : return energy_planner(intent)
+        # Use HRAT planner with configured IP
+        elif type == "HRAT"       : return hrat_planner(intent, current_app.config["HRAT_IP"])
+        # Use TFS optical planner with configured IP
+        elif type == "TFS_OPTICAL": return tfs_optical_planner(intent, current_app.config["OPTICAL_PLANNER_IP"], action = "create")
+        # Return None if planner type is unsupported
+        else : return None
diff --git a/src/planner/tfs_optical_planner/tfs_optical.py b/src/planner/tfs_optical_planner/tfs_optical.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d5bc79a76f9765ce6f2826f0e6b5d2a0f6a346c
--- /dev/null
+++ b/src/planner/tfs_optical_planner/tfs_optical.py
@@ -0,0 +1,393 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+import requests
+import os
+import uuid
+import json
+from src.config.constants import TEMPLATES_PATH
+from src.utils.safe_get import safe_get
+
+
+def tfs_optical_planner(intent, ip: str, action: str = "create") -> dict:
+    """
+    Plan optical layer configuration for TeraFlow SDN network slices.
+    
+    This function computes optical paths and generates configuration rules for
+    point-to-multipoint (P2MP) optical connections, including transceiver
+    activation and Layer 3 VPN configuration.
+    
+    Args:
+        intent (dict or str): For create action - network slice intent with service
+                             delivery points. For delete action - slice ID string
+        ip (str): IP address of the optical path computation service
+        action (str, optional): Operation to perform - "create" or "delete". 
+                               Defaults to "create"
+    
+    Returns:
+        dict or None: Configuration rules containing:
+            - network-slice-uuid: Unique identifier
+            - viability: Boolean indicating success
+            - actions: List of provisioning actions for:
+                * XR_AGENT_ACTIVATE_TRANSCEIVER (optical layer)
+                * CONFIG_VPNL3 (IP layer)
+              Returns None if source/destination not found or service unavailable
+              
+    Notes:
+        - Supports P2MP (Point-to-Multipoint) connectivity
+        - Computes optical paths using external TFS optical service
+        - Configures digital subcarrier groups for wavelength division
+        - Port 31060 used for optical path computation API
+        
+    Raises:
+        requests.exceptions.RequestException: On connection errors (logged, returns None)
+    """
+    if action == 'delete':
+        logging.debug("DELETE REQUEST RECEIVED: %s", intent)
+        # Load slice database to retrieve intent for deletion
+        with open(os.path.join(TEMPLATES_PATH, "slice.db"), 'r', encoding='utf-8') as file:
+            slices = json.load(file)
+
+        for slice_obj in slices:
+            if 'slice_id' in slice_obj and slice_obj['slice_id'] == intent:
+                logging.debug("Slice found: %s", slice_obj['slice_id'])
+                source = None
+                destination = None
+                services = slice_obj['intent']['ietf-network-slice-service:network-slice-services']['slice-service']
+                
+                # Extract source and destination from P2MP structure
+                for service in services:
+                    c_groups = service.get("connection-groups", {}).get("connection-group", [])
+                    for cg in c_groups:
+                        constructs = cg.get("connectivity-construct", [])
+                        for construct in constructs:
+                            if "p2mp-sdp" in construct:
+                                source = construct["p2mp-sdp"]["root-sdp-id"]
+                                destination = construct["p2mp-sdp"]["leaf-sdp-id"]
+                                break
+                        if source and destination:
+                            break
+                            
+                response = send_request(source, destination)
+                summary = {
+                    "source": source,
+                    "destination": destination,
+                    "connectivity-service": response
+                }
+                rules = generate_rules(summary, intent, action)
+    else:
+        # Extract source and destination from creation intent
+        services = intent["ietf-network-slice-service:network-slice-services"]["slice-service"]
+        source = None
+        destination = None
+        
+        for service in services:
+            c_groups = service.get("connection-groups", {}).get("connection-group", [])
+            for cg in c_groups:
+                constructs = cg.get("connectivity-construct", [])
+                for construct in constructs:
+                    source = safe_get(construct, ["p2mp-sdp", "root-sdp-id"])
+                    destination = safe_get(construct, ["p2mp-sdp", "leaf-sdp-id"])
+                    if source and destination:
+                        break
+                if source and destination:
+                    break
+                    
+        response = None
+        if source and destination:
+            response = send_request(source, destination, ip)
+            if not response:
+                return None
+            summary = {
+                "source": source,
+                "destination": destination,
+                "connectivity-service": response
+            }
+            logging.debug(summary)
+            rules = generate_rules(summary, intent, action)
+        else:
+            logging.warning(f"No rules generated. Skipping optical planning.")
+            return None
+            
+    return rules
+
+
+def send_request(source, destination, ip):
+    """
+    Send path computation request to the optical TFS service.
+    
+    Computes point-to-multipoint optical paths using the TAPI path computation API.
+    
+    Args:
+        source (str or list): Root node identifier(s) for P2MP path
+        destination (str or list): Leaf node identifier(s) for P2MP path
+        ip (str): IP address of the TFS optical service
+    
+    Returns:
+        dict or None: Path computation response containing connectivity service
+                     with optical connection attributes, or None on failure
+                     
+    Notes:
+        - API endpoint: POST /OpticalTFS/restconf/operations/tapi-path-computation:compute-p2mp
+        - Assumes 100 Gbps bitrate, bidirectional transmission
+        - Band width of 200, with 4 subcarriers per source
+        - 15 second timeout for requests
+    """
+    url = f"http://{ip}:31060/OpticalTFS/restconf/operations/tapi-path-computation:compute-p2mp"
+
+    headers = {
+        "Content-Type": "application/json",
+        "Accept": "*/*"
+    }
+
+    # Normalize source and destination to lists
+    if isinstance(source, str):
+        sources_list = [source]
+    else:
+        sources_list = list(source)
+
+    if isinstance(destination, str):
+        destinations_list = [destination]
+    else:
+        destinations_list = list(destination)
+
+    payload = {
+        "sources": sources_list,
+        "destinations": destinations_list,
+        "bitrate": 100,
+        "bidirectional": True,
+        "band": 200,
+        "subcarriers_per_source": [4] * len(sources_list)
+    }
+    logging.debug(f"Payload for path computation: {json.dumps(payload, indent=2)}")
+
+    try:
+        response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=1)
+        return json.loads(response.text)
+    except requests.exceptions.RequestException:
+        logging.warning("Error connecting to the Optical Planner service. Skipping optical planning.")
+        return None
+
+
+def group_block(group, action, group_id_override=None, node=None):
+    """
+    Generate a digital subcarrier group configuration block.
+    
+    Creates configuration for optical digital subcarriers, which are used for
+    wavelength division multiplexing in optical networks.
+    
+    Args:
+        group (dict): Subcarrier group data from path computation response
+        action (str): "create" to activate, "delete" to deactivate
+        group_id_override (int, optional): Override group ID. Defaults to None
+        node (str, optional): Node type - "leaf" for simplified config. Defaults to None
+    
+    Returns:
+        dict: Digital subcarrier group configuration with:
+            - digital_sub_carriers_group_id: Group identifier
+            - digital_sub_carrier_id: List of subcarrier configs with active status
+            
+    Notes:
+        - Leaf nodes use fixed 4 subcarriers (IDs 1-4)
+        - Non-leaf nodes use subcarrier IDs from computation response
+    """
+    active = "true" if action == 'create' else "false"
+    group_id = group_id_override if group_id_override is not None else group["digital_sub_carriers_group_id"]
+    
+    if node == "leaf":
+        # Simplified configuration for leaf nodes
+        return {
+            "digital_sub_carriers_group_id": group_id,
+            "digital_sub_carrier_id": [
+                {'sub_carrier_id': 1, 'active': active},
+                {'sub_carrier_id': 2, 'active': active},
+                {'sub_carrier_id': 3, 'active': active},
+                {'sub_carrier_id': 4, 'active': active}
+            ]
+        }
+    else:
+        # Full configuration based on computed path
+        return {
+            "digital_sub_carriers_group_id": group_id,
+            "digital_sub_carrier_id": [
+                {
+                    "sub_carrier_id": sid,
+                    "active": active,
+                }
+                for sid in group["subcarrier-id"]
+            ]
+        }
+
+
+def generate_rules(connectivity_service, intent, action):
+    """
+    Generate provisioning rules for optical and IP layer configuration.
+    
+    Transforms path computation results into concrete configuration actions
+    for transceivers and Layer 3 VPN setup.
+    
+    Args:
+        connectivity_service (dict): Path computation summary containing:
+            - source: Root node identifier
+            - destination: List of leaf node identifiers
+            - connectivity-service: Optical connection attributes
+        intent (dict): Original network slice intent with IP configuration
+        action (str): "create" or "delete" operation
+    
+    Returns:
+        list: Configuration rules with provisioning actions
+        
+    Notes:
+        - For create: Generates XR_AGENT_ACTIVATE_TRANSCEIVER and CONFIG_VPNL3 actions
+        - For delete: Generates DEACTIVATE_XR_AGENT_TRANSCEIVER actions
+        - Hub node uses channel-1 at 195000000 MHz
+        - Leaf nodes assigned specific channels (channel-1, channel-3, channel-5)
+        - Fixed VLAN ID of 500 for all connections
+        - Tunnel UUID generated from source-destination string
+    """
+    src_name = connectivity_service.get("source", "FALTA VALOR")
+    dest_list = connectivity_service.get("destination", ["FALTA VALOR"])
+    dest_str = ",".join(dest_list)
+    config_rules = []
+
+    # Generate deterministic UUID for tunnel based on endpoints
+    network_slice_uuid_str = f"{src_name}_to_{dest_str}"
+    tunnel_uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, network_slice_uuid_str))
+    
+    provisionamiento = {
+        "network-slice-uuid": network_slice_uuid_str,
+        "viability": True,
+        "actions": []
+    }
+
+    # Extract optical connection attributes from path computation
+    attributes = connectivity_service["connectivity-service"]["tapi-connectivity:connectivity-service"]["connection"][0]["optical-connection-attributes"]
+    groups = attributes["subcarrier-attributes"]["digital-subcarrier-group"]
+    operational_mode = attributes["modulation"]["operational-mode"]
+    
+    # Build hub (root) configuration with all subcarrier groups
+    hub_groups = [
+        group_block(group, action, group_id_override=index + 1)
+        for index, group in enumerate(groups)
+    ]
+    hub = {
+        "name": "channel-1",
+        "frequency": 195000000,
+        "target_output_power": 0,
+        "operational_mode": operational_mode,
+        "operation": "merge",
+        "digital_sub_carriers_group": hub_groups
+    }
+
+    # Build leaf configurations with specific frequencies per destination
+    leaves = []
+    for dest, group in zip(connectivity_service["destination"], groups):
+        # Map destinations to specific channels and frequencies
+        if dest == "T1.1":
+            name = "channel-1"
+            freq = 195006250
+        if dest == "T1.2":
+            name = "channel-3"
+            freq = 195018750
+        if dest == "T1.3":
+            name = "channel-5"
+            freq = 195031250
+            
+        leaf = {
+            "name": name,
+            "frequency": freq,
+            "target_output_power": group["Tx-power"],
+            "operational_mode": int(group["operational-mode"]),
+            "operation": "merge",
+            "digital_sub_carriers_group": [group_block(group, action, group_id_override=1, node="leaf")]
+        }
+        leaves.append(leaf)
+
+    final_json = {"components": [hub] + leaves}
+    
+    if action == 'create':
+        # Add transceiver activation action
+        provisionamiento["actions"].append({
+            "type": "XR_AGENT_ACTIVATE_TRANSCEIVER",
+            "layer": "OPTICAL",
+            "content": final_json,
+            "controller-uuid": "IPoWDM Controller"
+        })
+
+        # Extract IP configuration from intent for L3 VPN setup
+        nodes = {}
+        sdp_list = intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]['sdps']['sdp']
+
+        for sdp in sdp_list:
+            node = sdp['node-id']
+            attachments = sdp['attachment-circuits']['attachment-circuit']
+            for ac in attachments:
+                ip = ac.get('ac-ipv4-address', None)
+                prefix = ac.get('ac-ipv4-prefix-length', None)
+                vlan = 500  # Fixed VLAN ID
+                nodes[node] = {
+                    "ip-address": ip,
+                    "ip-mask": prefix,
+                    "vlan-id": vlan
+                }
+
+        # Add L3 VPN configuration action for P2MP topology
+        provisionamiento["actions"].append({
+            "type": "CONFIG_VPNL3",
+            "layer": "IP",
+            "content": {
+                "tunnel-uuid": tunnel_uuid,
+                "src-node-uuid": src_name,
+                "src-ip-address": nodes[src_name]["ip-address"],
+                "src-ip-mask": str(nodes[src_name]["ip-mask"]),
+                "src-vlan-id": nodes[src_name]["vlan-id"],
+                "dest1-node-uuid": dest_list[0],
+                "dest1-ip-address": nodes[dest_list[0]]["ip-address"],
+                "dest1-ip-mask": str(nodes[dest_list[0]]["ip-mask"]),
+                "dest1-vlan-id": nodes[dest_list[0]]["vlan-id"],
+                "dest2-node-uuid": dest_list[1],
+                "dest2-ip-address": nodes[dest_list[1]]["ip-address"],
+                "dest2-ip-mask": str(nodes[dest_list[1]]["ip-mask"]),
+                "dest2-vlan-id": nodes[dest_list[1]]["vlan-id"],
+                "dest3-node-uuid": dest_list[2],
+                "dest3-ip-address": nodes[dest_list[2]]["ip-address"],
+                "dest3-ip-mask": str(nodes[dest_list[2]]["ip-mask"]),
+                "dest3-vlan-id": nodes[dest_list[2]]["vlan-id"]
+            },
+            "controller-uuid": "IP Controller"
+        })
+
+        config_rules.append(provisionamiento)
+    else:
+        # For deletion, generate deactivation action
+        nodes = []
+        nodes.append(src_name)
+        for dst in dest_list:
+            nodes.append(dst)
+        aux = tunnel_uuid + '-' + src_name + '-' + '-'.join(dest_list)
+        
+        provisionamiento["actions"].append({
+            "type": "DEACTIVATE_XR_AGENT_TRANSCEIVER",
+            "layer": "OPTICAL",
+            "content": final_json,
+            "controller-uuid": "IPoWDM Controller",
+            "uuid": aux,
+            "nodes": nodes
+        })
+        config_rules.append(provisionamiento)
+
+    return config_rules
\ No newline at end of file
diff --git a/src/realizer/e2e/e2e_connect.py b/src/realizer/e2e/e2e_connect.py
new file mode 100644
index 0000000000000000000000000000000000000000..368ac3df1dc5c9809500f601f369705aefd828a4
--- /dev/null
+++ b/src/realizer/e2e/e2e_connect.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from ..tfs.helpers.tfs_connector import tfs_connector
+
+def e2e_connect(requests, controller_ip):
+    """ 
+    Function to connect end-to-end services in TeraFlowSDN (TFS) controller.
+    
+    Args:
+        requests (list): List of requests to be sent to the TFS e2e controller.
+        controller_ip (str): IP address of the TFS e2e controller.
+    """
+    response = tfs_connector().webui_post(controller_ip, requests)
+    return response
\ No newline at end of file
diff --git a/src/realizer/e2e/main.py b/src/realizer/e2e/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..6eed88389d428755c5b124a59383827de53d9259
--- /dev/null
+++ b/src/realizer/e2e/main.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .service_types.del_l3ipowdm_slice import del_l3ipowdm_slice
+from .service_types.l3ipowdm_slice import l3ipowdm_slice
+
+def e2e(ietf_intent, way=None, response=None, rules = None):
+    logging.debug(f"E2E Realizer selected: {way}")
+    if   way == "L3oWDM":     realizing_request = l3ipowdm_slice(rules)
+    elif way == "DEL_L3oWDM": realizing_request = del_l3ipowdm_slice(rules, response)
+    else:
+        logging.warning(f"Unsupported way: {way}.")
+        realizing_request = None
+    return realizing_request
\ No newline at end of file
diff --git a/src/realizer/e2e/service_types/del_l3ipowdm_slice.py b/src/realizer/e2e/service_types/del_l3ipowdm_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bbeb13c4fe8b58f2e3afb4ed5a22bfb02cb286c
--- /dev/null
+++ b/src/realizer/e2e/service_types/del_l3ipowdm_slice.py
@@ -0,0 +1,177 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH, NBI_L2_PATH
+from src.utils.load_template import load_template
+from flask import current_app
+
+def del_l3ipowdm_slice(ietf_intent, response):
+    """
+    Translate slice intent into a TeraFlow service request.
+
+    This method prepares a L2VPN service request by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+    6. Preparing configuration rules for network interfaces
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+
+    Returns:
+        dict: A TeraFlow service request for L2VPN configuration.
+
+    """
+    # Hardcoded router endpoints
+    # TODO (should be dynamically determined)
+    origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
+    origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
+    destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+    slice = next((d for d in response if d.get("id") == id), None)
+
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        # Load L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "L2-VPN_template_empty.json"))["services"][0]
+
+        # Configure service UUID
+        tfs_request["service_id"]["service_uuid"]["uuid"] = ietf_intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]["id"]
+
+        # Configure service endpoints
+        for endpoint in tfs_request["service_endpoint_ids"]:
+            endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
+            endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
+
+        # Add service constraints
+        for constraint in slice.get("requirements", []):
+            tfs_request["service_constraints"].append({"custom": constraint})
+
+        # Add configuration rules
+        for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
+            router_id = origin_router_id if i == 1 else destination_router_id
+            router_if = origin_router_if if i == 1 else destination_router_if
+            resource_value = config_rule["custom"]["resource_value"]
+
+            sdp_index = i - 1
+            vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
+            if vlan_value:
+                resource_value["vlan_id"] = int(vlan_value)
+            resource_value["circuit_id"] = vlan_value
+            resource_value["remote_router"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
+            config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
+
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        #self.path = NBI_L2_PATH
+        # Load IETF L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "ietfL2VPN_template_empty.json"))
+
+        # Add path to the request
+        tfs_request["path"] = NBI_L2_PATH
+
+        # Generate service UUID
+        full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        uuid_only = full_id.split("slice-service-")[-1]
+        tfs_request["ietf-l2vpn-svc:vpn-service"][0]["vpn-id"] = uuid_only
+
+        # Configure service endpoints
+        sites = tfs_request["ietf-l2vpn-svc:vpn-service"][0]["site"]
+        sdps = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"]
+
+        for i, site in enumerate(sites):
+            is_origin = (i == 0)
+            router_id = origin_router_id if is_origin else destination_router_id
+            sdp = sdps[0] if is_origin else sdps[1]
+            site["site-id"] = router_id
+            site["site-location"] = sdp["node-id"]
+            site["site-network-access"]["interface"]["ip-address"] = sdp["sdp-ip-address"]
+
+    logging.info(f"L2VPN Intent realized\n")
+    return tfs_request
+
+def tfs_l2vpn_support(requests):
+    """
+    Configuration support for L2VPN with path selection based on MPLS traffic-engineering tunnels
+
+    Args:
+        requests (list): A list of configuration parameters.
+
+    """
+    sources={
+        "source": "10.60.125.44",
+        "config":[]
+    }
+    destinations={
+        "destination": "10.60.125.45",
+        "config":[]
+    }
+    for request in requests:
+        # Configure Source Endpoint
+        temp_source = request["service_config"]["config_rules"][1]["custom"]["resource_value"]
+        endpoints = request["service_endpoint_ids"]
+        config = {
+            "ni_name": temp_source["ni_name"],
+            "remote_router": temp_source["remote_router"],
+            "interface": endpoints[0]["endpoint_uuid"]["uuid"].replace("0/0/0-", ""),
+            "vlan" : temp_source["vlan_id"],
+            "number" : temp_source["vlan_id"] % 10 + 1
+        }
+        sources["config"].append(config)
+
+        # Configure Destination Endpoint
+        temp_destiny = request["service_config"]["config_rules"][2]["custom"]["resource_value"]
+        config = {
+            "ni_name": temp_destiny["ni_name"],
+            "remote_router": temp_destiny["remote_router"],
+            "interface": endpoints[1]["endpoint_uuid"]["uuid"].replace("0/0/3-", ""),
+            "vlan" : temp_destiny["vlan_id"],
+            "number" : temp_destiny["vlan_id"] % 10 + 1
+        }
+        destinations["config"].append(config)
+
+    #cisco_source = cisco_connector(source_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_source = cisco_connector(sources["source"], sources["config"])
+    commands = cisco_source.full_create_command_template()
+    cisco_source.execute_commands(commands)
+
+    #cisco_destiny = cisco_connector(destination_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_destiny = cisco_connector(destinations["destination"], destinations["config"])
+    commands = cisco_destiny.full_create_command_template()
+    cisco_destiny.execute_commands(commands)
+
+def tfs_l2vpn_delete():
+    """
+    Delete L2VPN configurations from Cisco devices.
+
+    This method removes L2VPN configurations from Cisco routers
+
+    Notes:
+        - Uses cisco_connector to generate and execute deletion commands
+        - Clears Network Interface (NI) settings
+    """
+    # Delete Source Endpoint Configuration
+    source_address = "10.60.125.44"
+    cisco_source = cisco_connector(source_address)
+    cisco_source.execute_commands(cisco_source.create_command_template_delete())
+
+    # Delete Destination Endpoint Configuration
+    destination_address = "10.60.125.45"
+    cisco_destiny = cisco_connector(destination_address)
+    cisco_destiny.execute_commands(cisco_destiny.create_command_template_delete())
\ No newline at end of file
diff --git a/src/realizer/e2e/service_types/l3ipowdm_slice.py b/src/realizer/e2e/service_types/l3ipowdm_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fadf4487056d20f65941c0e0e45caf69b249a82
--- /dev/null
+++ b/src/realizer/e2e/service_types/l3ipowdm_slice.py
@@ -0,0 +1,192 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH
+from src.utils.load_template import load_template
+
+def l3ipowdm_slice(rules):
+    """
+    Prepare a Optical service request for an optical slice.
+
+    This method prepares a TeraFlow service request for an optical slice by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        rules (dict, optional): Configuration rules for the optical slice.
+
+    Returns:
+        dict: A TeraFlow service request for optical slice configuration.
+    """
+    transceiver_params = []
+    bandwidth = 0
+
+    logging.debug(f"Preparing L3oWDM slice with rules: {rules}")
+    tfs_requests = []
+    for rule in rules["actions"]:
+        logging.debug(f"Processing rule: {rule['type']}")
+        if rule["type"] == "CREATE_OPTICAL_SLICE":
+            tfs_request = load_template(os.path.join(TEMPLATES_PATH, "Optical_slice.json"))
+            request = optical_slice_template(tfs_request, rules)
+            logging.debug(f"Sending Optical Slice to Optical Controller {request}")
+            tfs_requests.append(request)
+
+        elif rule["type"] == "PROVISION_MEDIA_CHANNEL_OLS_PATH":
+
+            origin_router_id         = rule["content"]["src-sip-uuid"]
+            destination_router_id    = rule["content"]["dest-sip-uuid"]
+            direction                = rule["content"]["direction"]
+            bandwidth                = rule["content"]["bandwidth-ghz"]
+            service_uuid             = rule["content"]["ols-path-uuid"]
+            tenant_uuid              = rule["tenant-uuid"]
+            layer_protocol_name      = rule["content"]["layer-protocol-name"]
+            layer_protocol_qualifier = rule["content"]["layer-protocol-qualifier"]
+            lower_frequency_mhz      = rule["content"]["lower-frequency-mhz"]
+            upper_frequency_mhz      = rule["content"]["upper-frequency-mhz"]
+            link_uuid_path           = rule["content"]["link-uuid-path"]
+            granularity              = rule["content"]["adjustment-granularity"]
+            grid                     = rule["content"]["grid-type"]
+
+            tfs_request = load_template(os.path.join(TEMPLATES_PATH, "TAPI_service.json"))
+
+            tfs_request["services"][0]["service_id"]["service_uuid"]["uuid"] = service_uuid
+            config_rules = tfs_request["services"][0]["service_config"]["config_rules"][0]
+
+            config_rules["tapi_lsp"]["rule_set"]["src"]  = origin_router_id
+            config_rules["tapi_lsp"]["rule_set"]["dst"]  = destination_router_id
+            config_rules["tapi_lsp"]["rule_set"]["uuid"] = service_uuid
+            config_rules["tapi_lsp"]["rule_set"]["bw"]   = str(bandwidth)
+            config_rules["tapi_lsp"]["rule_set"]["tenant_uuid"] = tenant_uuid
+            config_rules["tapi_lsp"]["rule_set"]["direction"]   = direction
+            config_rules["tapi_lsp"]["rule_set"]["layer_protocol_name"] = layer_protocol_name
+            config_rules["tapi_lsp"]["rule_set"]["layer_protocol_qualifier"] = layer_protocol_qualifier
+            config_rules["tapi_lsp"]["rule_set"]["lower_frequency_mhz"] = str(lower_frequency_mhz)
+            config_rules["tapi_lsp"]["rule_set"]["upper_frequency_mhz"] = str(upper_frequency_mhz)
+            config_rules["tapi_lsp"]["rule_set"]["link_uuid_path"] = link_uuid_path
+            config_rules["tapi_lsp"]["rule_set"]["granularity"]    = granularity
+            config_rules["tapi_lsp"]["rule_set"]["grid_type"]      = grid
+
+            logging.debug(f"Sending Media Channel Service to Orchestrator: {tfs_request}")
+            tfs_requests.append(tfs_request)
+
+        elif rule["type"] == "ACTIVATE_TRANSCEIVER":
+            params = {
+                "router_id": rule["content"]["node-uuid"],
+                "router_tp": rule["content"]["termination-point-uuid"],
+                "frequency": rule["content"]["frequency-ghz"],
+                "power":     rule["content"]["tx-power-dbm"]
+            }
+            transceiver_params.append(params)
+        elif rule["type"] == "CONFIG_VPNL3":
+            src_router_id  = rule["content"]["src-node-uuid"]
+
+            if src_router_id == transceiver_params[0]["router_id"]:
+                src_power = transceiver_params[0]["power"]
+                src_frequency = transceiver_params[0]["frequency"]
+                dst_power = transceiver_params[1]["power"]
+                dst_frequency = transceiver_params[1]["frequency"]
+            else:
+                src_power = transceiver_params[1]["power"]
+                src_frequency = transceiver_params[1]["frequency"]
+                dst_power = transceiver_params[0]["power"]
+                dst_frequency = transceiver_params[0]["frequency"]
+
+            src_router_id  = rule["content"]["src-node-uuid"]
+            src_ip_address = rule["content"]["src-ip-address"]
+            src_ip_mask    = rule["content"]["src-ip-mask"]
+            src_vlan_id    = rule["content"]["src-vlan-id"]
+
+            dst_router_id  = rule["content"]["dest-node-uuid"]
+            dst_ip_address = rule["content"]["dest-ip-address"]
+            dst_ip_mask    = rule["content"]["dest-ip-mask"]
+            dst_vlan_id    = rule["content"]["dest-vlan-id"]
+
+            service_uuid = rule["content"]["tunnel-uuid"]
+
+            tfs_request = load_template(os.path.join(TEMPLATES_PATH, "IPoWDM_orchestrator.json"))
+            tfs_request["services"][0]["service_id"]["service_uuid"]["uuid"] = service_uuid
+            config_rules = tfs_request["services"][0]["service_config"]["config_rules"][0]
+            src = config_rules["ipowdm"]["rule_set"]["src"]
+            src.append({
+                'uuid': src_router_id,
+                'ip_address': src_ip_address,
+                'ip_mask': src_ip_mask,
+                'vlan_id': src_vlan_id,
+                'power': src_power,
+                'frequency': src_frequency
+            })
+
+            dst = config_rules["ipowdm"]["rule_set"]["dst"]
+            dst.append({
+                'uuid': dst_router_id,
+                'ip_address': dst_ip_address,
+                'ip_mask': dst_ip_mask,
+                'vlan_id': dst_vlan_id,
+                'power': dst_power,
+                'frequency': dst_frequency
+            })
+
+            config_rules["ipowdm"]["rule_set"]["bw"]        = bandwidth
+            config_rules["ipowdm"]["rule_set"]["uuid"]      = service_uuid
+
+            logging.debug(f"Sending IPoWDM Service to Orchestrator: {tfs_request}")
+            tfs_requests.append(tfs_request)
+
+        else:
+            logging.debug("Unsupported rule type for optical slice: %s", rule["type"])
+    return tfs_requests
+
+def optical_slice_template(template, rule):
+    """
+    Complete the optical slice template with the data provided.
+    Args:
+        template (dict): optical slice template.
+        data (dict): Data to complete the template.
+    Returns:
+        dict: Template completed.
+    """
+
+    for action in rule.get('actions', []):
+        content = action.get('content', {})
+        nodes = content.get('node', [])
+        for node in nodes:
+            for onp in node.get('owned-node-edge-point', []):
+                if 'media-channel-node-edge-point-spec' in onp:
+                    onp['tapi-photonic-media:media-channel-node-edge-point-spec'] = onp.pop('media-channel-node-edge-point-spec')
+
+    for i, sip in enumerate(template['tapi-common:context']['service-interface-point']):
+        if i < len(rule['actions'][0]['content']['service-interface-point']):
+            sip['uuid'] = rule['actions'][0]['content']['service-interface-point'][i]['uuid']
+
+    nodes_template = template['tapi-common:context']['tapi-topology:topology-context']['topology'][0]['node']
+    nodes_data = rule['actions'][0]['content']['node']
+    for new_node in nodes_data:
+        nodes_template.append(new_node)
+
+    links_template = template['tapi-common:context']['tapi-topology:topology-context']['topology'][0]['link']
+    links_rule = rule['actions'][0]['content']['link']
+    for link_t in links_rule:
+        links_template.append(link_t)
+
+    template['tapi-common:context']['uuid'] = rule['actions'][0]['content']['tenant-uuid']
+    template['tapi-common:context']['name'][0]['value'] = rule['network-slice-uuid']
+
+    return template
diff --git a/src/realizers/ixia/NEII_V4.py b/src/realizer/ixia/helpers/NEII_V4.py
similarity index 99%
rename from src/realizers/ixia/NEII_V4.py
rename to src/realizer/ixia/helpers/NEII_V4.py
index f9379d2cc0ddb0aceecb38ad918e0a995b0cebfe..e9bf61a24d0a6b42f6d0179a4d9a92640ab679ec 100644
--- a/src/realizers/ixia/NEII_V4.py
+++ b/src/realizer/ixia/helpers/NEII_V4.py
@@ -16,13 +16,12 @@
 
 from .automatizacion_ne2v4 import automatizacion
 import ipaddress, logging
-from src.Constants import IXIA_IP
 
 class NEII_controller:
-    def __init__(self, ixia_ip=IXIA_IP):
+    def __init__(self, ixia_ip):
         self.ixia_ip = ixia_ip
 
-    def menu_principal(self, ip=IXIA_IP):
+    def menu_principal(self, ip):
         '''
         Inputs:
         Outputs:
diff --git a/src/realizers/ixia/automatizacion_ne2v4.py b/src/realizer/ixia/helpers/automatizacion_ne2v4.py
similarity index 100%
rename from src/realizers/ixia/automatizacion_ne2v4.py
rename to src/realizer/ixia/helpers/automatizacion_ne2v4.py
diff --git a/src/realizer/ixia/ixia_connect.py b/src/realizer/ixia/ixia_connect.py
new file mode 100644
index 0000000000000000000000000000000000000000..456d23a775cfda023404748623acd7f7b9a96e44
--- /dev/null
+++ b/src/realizer/ixia/ixia_connect.py
@@ -0,0 +1,35 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from .helpers.NEII_V4 import NEII_controller
+
+def ixia_connect(requests, ixia_ip):
+    """
+    Connect to the IXIA NEII controller and send the requests.
+    
+    Args:
+        requests (dict): IXIA NEII requests
+        ixia_ip (str): IXIA NEII controller IP address
+    
+    Returns:
+        response (requests.Response): Response from the IXIA NEII controller
+    """
+    response = None
+    neii_controller = NEII_controller(ixia_ip)
+    for intent in requests["services"]:
+        # Send each separate IXIA request
+        response = neii_controller.nscNEII(intent)
+    return response
\ No newline at end of file
diff --git a/src/realizer/ixia/main.py b/src/realizer/ixia/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b8d3d7c4bba812cf9439b0c0d88f6e571558efb
--- /dev/null
+++ b/src/realizer/ixia/main.py
@@ -0,0 +1,93 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+
+def ixia(ietf_intent):
+    """
+    Prepare an Ixia service request based on the IETF intent.
+
+    This method configures an Ixia service request by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+
+    Returns:
+        dict: An Ixia service request for configuration.
+    """
+    metric_bounds = ietf_intent.get("ietf-network-slice-service:network-slice-services", {}) \
+        .get("slo-sle-templates", {}) \
+        .get("slo-sle-template", [{}])[0] \
+        .get("slo-policy", {}) \
+        .get("metric-bound", [])
+
+    # Inicializar valores
+    bandwidth = None
+    latency = None
+    tolerance = None
+
+    # Asignar valores según el tipo de métrica
+    for metric in metric_bounds:
+        metric_type = metric.get("metric-type")
+        bound = metric.get("bound")
+
+        if metric_type == "one-way-bandwidth":
+            bandwidth = bound
+        elif metric_type == "one-way-delay-maximum":
+            latency = bound
+        elif metric_type == "one-way-delay-variation-maximum": 
+            tolerance = bound
+
+    # Construcción del diccionario intent
+    intent = {
+        "src_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slice-service", [{}])[0]
+            .get("sdps", {}).get("sdp", [{}])[0]
+            .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
+            .get("sdp-peering", {}).get("peer-sap-id"),
+
+        "dst_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slice-service", [{}])[0]
+            .get("sdps", {}).get("sdp", [{}, {}])[1]
+            .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
+            .get("sdp-peering", {}).get("peer-sap-id"),
+
+        "vlan_id": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slice-service", [{}])[0]
+            .get("sdps", {}).get("sdp", [{}])[0]
+            .get("service-match-criteria", {}).get("match-criterion", [{}])[0]
+            .get("value"),
+
+        "bandwidth": bandwidth,
+        "latency": latency,
+        "tolerance": tolerance,
+
+        "latency_version": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
+            .get("description"),
+
+        "reliability": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
+            .get("sle-policy", {}).get("reliability"),
+    }
+
+    logging.info(f"IXIA Intent realized\n")
+    return intent
\ No newline at end of file
diff --git a/src/realizer/main.py b/src/realizer/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb6908af7e82484e09c518b837d53dba11847244
--- /dev/null
+++ b/src/realizer/main.py
@@ -0,0 +1,83 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging
+from .select_way import select_way
+from .nrp_handler import nrp_handler
+from src.utils.safe_get import safe_get
+
+def realizer(ietf_intent, need_nrp=False, order=None, nrp=None, controller_type=None, response=None, rules = None):
+    """
+    Manage the slice creation workflow.
+
+    This method handles two primary scenarios:
+    1. Interact with network controllers for NRP (Network Resource Partition) operations when need_nrp is True
+    2. Slice service selection when need_nrp is False
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        need_nrp (bool, optional): Flag to indicate if NRP operations are needed. Defaults to False.
+        order (str, optional): Type of NRP operation (READ, UPDATE, CREATE). Defaults to None.
+        nrp (dict, optional): Specific Network Resource Partition to operate on. Defaults to None.
+        controller_type (str, optional): Type of controller (TFS, IXIA, E2E). Defaults to None.
+        response (dict, optional): Response built for user feedback. Defaults to None.
+        rules (dict, optional): Specific rules for slice realization. Defaults to None.
+    
+    Returns:
+        dict: A realization request for the specified network slice type.
+    """
+    if need_nrp:
+        # Perform NRP-related operations
+        nrp_view = nrp_handler(order, nrp)
+        return nrp_view
+    else:
+        # Select slice service method
+        if controller_type == "E2E":
+            if isinstance(rules, list) and len(rules) > 0: rules = rules[0]
+            actions = rules.get("actions", []) if (rules and not type(rules)== str) else []
+
+            has_transceiver  = any(a.get("type", "").startswith("XR_AGENT_ACTIVATE_TRANSCEIVER") for a in actions)
+            has_optical      = any(a.get("type", "").startswith("PROVISION_MEDIA_CHANNEL") for a in actions)
+            has_l3           = any(a.get("type", "").startswith("CONFIG_VPNL3") for a in actions)
+            has_l2           = any(a.get("type", "").startswith("CONFIG_VPNL2") for a in actions)
+
+            del_transceiver  = any(a.get("type", "").startswith("DEACTIVATE_XR_AGENT_TRANSCEIVER") for a in actions)
+            del_optical      = any(a.get("type", "").startswith("DEPROVISION_OPTICAL_RESOURCE") for a in actions)
+            del_l3           = any(a.get("type", "").startswith("REMOVE_VPNL3") for a in actions)
+            del_l2           = any(a.get("type", "").startswith("REMOVE_VPNL2") for a in actions)
+
+            if   has_transceiver:         selected_way = "L3oWDM"
+            elif has_optical and has_l3:  selected_way = "L3oWDM"
+            elif has_optical and has_l2:  selected_way = "L2oWDM"
+            elif has_optical:             selected_way = "OPTIC"
+            elif has_l3:                  selected_way = "L3VPN"
+            elif has_l2:                  selected_way = "L2VPN"
+
+            elif del_transceiver:         selected_way = "DEL_L3oWDM"
+            elif del_optical and del_l3:  selected_way = "DEL_L3oWDM"
+            elif del_optical and del_l2:  selected_way = "DEL_L2oWDM"
+            elif del_optical:             selected_way = "DEL_OPTIC"
+            elif del_l3:                  selected_way = "DEL_L3VPN"
+            elif del_l2:                  selected_way = "DEL_L2VPN"
+            else:
+                logging.warning("Cannot determine the realization way from rules. Skipping request.")
+                return None
+            way = selected_way
+        else:
+            way = safe_get(ietf_intent, ['ietf-network-slice-service:network-slice-services', 'slice-service', 0, 'service-tags', 'tag-type', 0, 'tag-type-value', 0])
+        logging.info(f"Selected way: {way}")
+        request = select_way(controller=controller_type, way=way, ietf_intent=ietf_intent, response=response, rules = rules)
+        return request
diff --git a/src/realizer/nrp_handler.py b/src/realizer/nrp_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa08d8deddddb33d8fee93042e1539450c84ce48
--- /dev/null
+++ b/src/realizer/nrp_handler.py
@@ -0,0 +1,72 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, os, json
+from src.config.constants import DATABASE_PATH
+
+def nrp_handler(request, nrp):
+    """
+    Manage Network Resource Partition (NRP) operations.
+
+    This method handles CRUD operations for Network Resource Partitions,
+    interacting with Network Controllers (currently done statically via a JSON-based database file).
+
+    Args:
+        request (str): The type of operation to perform. 
+            Supported values:
+            - "CREATE": Add a new NRP to the database
+            - "READ": Retrieve the current NRP view
+            - "UPDATE": Update an existing NRP (currently a placeholder)
+
+        nrp (dict): The Network Resource Partition details to create or update.
+
+    Returns:
+        None or answer: 
+        - For "CREATE": Returns the response from the controller (currently using a static JSON)
+        - For "READ": Gets the NRP view from the controller (currently using a static JSON)
+        - For "UPDATE": Placeholder for update functionality
+
+    Notes:
+        - Uses a local JSON file "nrp_ddbb.json" to store NRP information as controller operation is not yet defined
+    """
+    if request == "CREATE":
+        # TODO: Implement actual request to Controller to create an NRP
+        logging.debug("Creating NRP")
+
+        # Load existing NRP database
+        with open(os.path.join(DATABASE_PATH, "nrp_ddbb.json"), "r") as archivo:
+            nrp_view = json.load(archivo)
+
+        # Append new NRP to the view
+        nrp_view.append(nrp)
+
+        # Placeholder for controller POST request
+        answer = None
+        return answer
+    elif request == "READ":
+        # TODO: Request to Controller to get topology and current NRP view
+        logging.debug("Reading Topology")
+
+        # Load NRP database
+        with open(os.path.join(DATABASE_PATH, "nrp_ddbb.json"), "r") as archivo:
+            # self.__nrp_view = json.load(archivo)
+            nrp_view = json.load(archivo)
+            return nrp_view
+        
+    elif request == "UPDATE":
+        # TODO: Implement request to Controller to update NRP
+        logging.debug("Updating NRP")
+        answer = ""
\ No newline at end of file
diff --git a/src/realizer/select_way.py b/src/realizer/select_way.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d3cc531b03ef75def47eaadd864ef51c503d900
--- /dev/null
+++ b/src/realizer/select_way.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .ixia.main import ixia
+from .tfs.main  import tfs
+from .e2e.main  import e2e
+
+def select_way(controller=None, way=None, ietf_intent=None, response=None, rules = None):
+    """
+    Determine the method of slice realization.
+
+    Args:
+        controller (str): The controller to use for slice realization. Defaults to None.
+            Supported values:
+            - "IXIA": IXIA NEII for network testing
+            - "TFS": TeraFlow Service for network slice management
+            - "E2E": End-to-End controller for e2e slice management
+        way (str): The type of technology to use. Defaults to None.
+        ietf_intent (dict): IETF-formatted network slice intent. Defaults to None.
+        response (dict): Response built for user feedback. Defaults to None.
+        rules (list, optional): Specific rules for slice realization. Defaults to None.
+
+    Returns:
+        dict: A realization request for the specified network slice type.
+
+    """
+    realizing_request = None
+    if controller == "TFS":
+        realizing_request = tfs(ietf_intent, way, response)
+    elif controller == "IXIA":
+        realizing_request = ixia(ietf_intent)
+    elif controller == "E2E":
+        realizing_request = e2e(ietf_intent, way, response, rules)
+    else:
+        logging.warning(f"Unsupported controller: {controller}. Defaulting to TFS realization.")
+        realizing_request = tfs(ietf_intent, way, response)
+    return realizing_request
\ No newline at end of file
diff --git a/src/realizer/send_controller.py b/src/realizer/send_controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..334de49bdb472d898c968aa0125190a396cf733d
--- /dev/null
+++ b/src/realizer/send_controller.py
@@ -0,0 +1,65 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from flask import current_app
+from .tfs.tfs_connect import tfs_connect
+from .ixia.ixia_connect import ixia_connect
+from .e2e.e2e_connect import e2e_connect
+
+def send_controller(controller_type, requests):
+    """
+    Route provisioning requests to the appropriate network controller.
+    
+    This function acts as a dispatcher that sends configuration requests to
+    different SDN controller types based on the specified controller type.
+    
+    Args:
+        controller_type (str): Type of controller to send requests to:
+            - "TFS": TeraFlow SDN controller
+            - "IXIA": Ixia network emulation controller
+            - "E2E": TeraFlow End-to-End controller
+        requests (dict or list): Configuration requests to be sent to the controller
+    
+    Returns:
+        bool or dict: Response from the controller indicating success/failure
+                     of the provisioning operation. Returns True in DUMMY_MODE.
+                     
+    Notes:
+        - If DUMMY_MODE is enabled in config, returns True without sending requests
+        - Uses IP addresses from Flask application configuration:
+          * TFS_IP for TeraFlow
+          * IXIA_IP for Ixia
+          * TFS_E2E for End-to-End
+        - Logs the controller type that received the request
+        
+    Raises:
+        Exception: May be raised by individual connect functions on communication errors
+    """
+    if current_app.config["DUMMY_MODE"]:
+        return True
+        
+    if controller_type == "TFS":
+        response = tfs_connect(requests, current_app.config["TFS_IP"])
+        logging.info("Request sent to Teraflow")
+    elif controller_type == "IXIA":
+        response = ixia_connect(requests, current_app.config["IXIA_IP"])
+        logging.info("Requests sent to Ixia")
+    elif controller_type == "E2E":
+        response = e2e_connect(requests, current_app.config["TFS_E2E"])
+        logging.info("Requests sent to Teraflow E2E")
+        
+    return response
\ No newline at end of file
diff --git a/src/helpers.py b/src/realizer/tfs/helpers/cisco_connector.py
similarity index 56%
rename from src/helpers.py
rename to src/realizer/tfs/helpers/cisco_connector.py
index 0e150791ac742c02c03aaa755c04a980481b4336..48120069234d6e04f722dfa48443a0ace2051b3c 100644
--- a/src/helpers.py
+++ b/src/realizer/tfs/helpers/cisco_connector.py
@@ -12,87 +12,55 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This file includes original contributions from Telefonica Innovación Digital S.L.
+# This file is an original contribution from Telefonica Innovación Digital S.L.
 
-import logging, requests, json
+import logging
 from netmiko import ConnectHandler
-from src.Constants import DEFAULT_LOGGING_LEVEL
 
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
-    level=DEFAULT_LOGGING_LEVEL,
-    format='%(levelname)s - %(message)s')
-
-#Teraflow
-class tfs_connector():
-    
-    def webui_post(self, tfs_ip, service):
-        user="admin"
-        password="admin"
-        token=""
-        session = requests.Session()
-        session.auth = (user, password)
-        url=f'http://{tfs_ip}/webui'
-        response=session.get(url=url)
-        for item in response.iter_lines():
-            if("csrf_token" in str(item)):
-                string=str(item).split(' requests.Response:
+        """
+        Delete service from TFS NBI.
+        Args:
+            tfs_ip (str): IP address of the TFS instance
+            service_type (str): Type of the service ('L2' or 'L3')
+            service_id (str): Unique identifier of the service to delete
+        Returns:
+            requests.Response: Response object from the DELETE request
+        """
+        user="admin"
+        password="admin"
+        url = f'http://{user}:{password}@{tfs_ip}'
+        if service_type == 'L2':
+            url = url + f'/{NBI_L2_PATH}/vpn-service={service_id}'
+        elif service_type == 'L3':
+            url = url + f'/{NBI_L3_PATH}/vpn-service={service_id}'
+        else:
+            raise ValueError("Invalid service type. Use 'L2' or 'L3'.")
+        response = requests.delete(url, timeout=60)
+        response.raise_for_status()
+        logging.debug('Service deleted successfully')
+        logging.debug("Http response: %s",response.text)
+        return response
\ No newline at end of file
diff --git a/src/realizer/tfs/main.py b/src/realizer/tfs/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..74be42ab9990c89a929d7b4157e8c2ca9f4e4a9e
--- /dev/null
+++ b/src/realizer/tfs/main.py
@@ -0,0 +1,40 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .service_types.tfs_l2vpn import tfs_l2vpn
+from .service_types.tfs_l3vpn import tfs_l3vpn
+
+def tfs(ietf_intent, way=None, response=None):
+    """
+    Generates a TFS realizing request based on the specified way (L2 or L3).
+
+    Args:
+        ietf_intent (dict): The IETF intent to be realized. Defaults to None.
+        way (str): The type of service to realize ("L2" or "L3"). Defaults to None.
+        response (dict): Response built for user feedback. Defaults to None.
+        
+    Returns:
+        dict: A realization request for the specified network slice type.
+    """
+    if way == "L2":
+        realizing_request = tfs_l2vpn(ietf_intent, response)
+    elif way == "L3":
+        realizing_request = tfs_l3vpn(ietf_intent, response)
+    else:
+        logging.warning(f"Unsupported way: {way}. Defaulting to L2 realization.")
+        realizing_request = tfs_l2vpn(ietf_intent, response)
+    return realizing_request
\ No newline at end of file
diff --git a/src/realizer/tfs/service_types/tfs_l2vpn.py b/src/realizer/tfs/service_types/tfs_l2vpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cf5af9af911f8483278dde743ff752ee8dfd743
--- /dev/null
+++ b/src/realizer/tfs/service_types/tfs_l2vpn.py
@@ -0,0 +1,186 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH, NBI_L2_PATH
+from src.utils.load_template import load_template
+from src.utils.safe_get import safe_get
+from ..helpers.cisco_connector import cisco_connector
+from flask import current_app
+
+def tfs_l2vpn(ietf_intent, response):
+    """
+    Translate slice intent into a TeraFlow service request.
+
+    This method prepares a L2VPN service request by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+    6. Preparing configuration rules for network interfaces
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        response (dict): Response data containing slice information.
+
+    Returns:
+        dict: A TeraFlow service request for L2VPN configuration.
+
+    """
+    # Hardcoded router endpoints
+    # TODO (should be dynamically determined)
+    origin_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 0, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not origin_router_id:
+        logging.warning("Origin router ID not found in the intent. Skipping L2VPN realization.")
+        return None
+    origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    destination_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 1, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not destination_router_id:
+        logging.warning("Destination router ID not found in the intent. Skipping L2VPN realization.")
+        return None
+    destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+    slice = next((d for d in response if d.get("id") == id), None)
+
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        # Load L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "L2-VPN_template_empty.json"))["services"][0]
+
+        # Configure service UUID
+        tfs_request["service_id"]["service_uuid"]["uuid"] = ietf_intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]["id"]
+
+        # Configure service endpoints
+        for endpoint in tfs_request["service_endpoint_ids"]:
+            endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
+            endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
+
+        # Add service constraints
+        for constraint in slice.get("requirements", []):
+            tfs_request["service_constraints"].append({"custom": constraint})
+
+        # Add configuration rules
+        for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
+            router_id = origin_router_id if i == 1 else destination_router_id
+            router_if = origin_router_if if i == 1 else destination_router_if
+            resource_value = config_rule["custom"]["resource_value"]
+
+            sdp_index = i - 1
+            vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
+            if vlan_value:
+                resource_value["vlan_id"] = int(vlan_value)
+            resource_value["circuit_id"] = vlan_value
+            resource_value["remote_router"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
+            config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
+
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        #self.path = NBI_L2_PATH
+        # Load IETF L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "ietfL2VPN_template_empty.json"))
+        
+        # Add path to the request
+        tfs_request["path"] = NBI_L2_PATH
+
+        # Generate service UUID
+        full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        uuid_only = full_id.split("slice-service-")[-1]
+        tfs_request["ietf-l2vpn-svc:vpn-service"][0]["vpn-id"] = uuid_only
+
+        # Configure service endpoints
+        sites = tfs_request["ietf-l2vpn-svc:vpn-service"][0]["site"]
+        sdps = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"]
+
+        for i, site in enumerate(sites):
+            is_origin = (i == 0)
+            router_id = origin_router_id if is_origin else destination_router_id
+            sdp = sdps[0] if is_origin else sdps[1]
+            site["site-id"] = router_id
+            site["site-location"] = sdp["node-id"]
+            site["site-network-access"]["interface"]["ip-address"] = sdp["sdp-ip-address"]
+
+    logging.info(f"L2VPN Intent realized")
+    return tfs_request
+
+def tfs_l2vpn_support(requests):
+    """
+    Configuration support for L2VPN with path selection based on MPLS traffic-engineering tunnels
+
+    Args:
+        requests (list): A list of configuration parameters.
+
+    """
+    sources={
+        "source": "10.60.125.44",
+        "config":[]
+    }
+    destinations={
+        "destination": "10.60.125.45",
+        "config":[]
+    }
+    for request in requests:
+        # Configure Source Endpoint
+        temp_source = request["service_config"]["config_rules"][1]["custom"]["resource_value"]
+        endpoints = request["service_endpoint_ids"]
+        config = {
+            "ni_name": temp_source["ni_name"],
+            "remote_router": temp_source["remote_router"],
+            "interface": endpoints[0]["endpoint_uuid"]["uuid"].replace("0/0/0-", ""),
+            "vlan" : temp_source["vlan_id"],
+            "number" : temp_source["vlan_id"] % 10 + 1
+        }
+        sources["config"].append(config)
+
+        # Configure Destination Endpoint
+        temp_destiny = request["service_config"]["config_rules"][2]["custom"]["resource_value"]
+        config = {
+            "ni_name": temp_destiny["ni_name"],
+            "remote_router": temp_destiny["remote_router"],
+            "interface": endpoints[1]["endpoint_uuid"]["uuid"].replace("0/0/3-", ""),
+            "vlan" : temp_destiny["vlan_id"],
+            "number" : temp_destiny["vlan_id"] % 10 + 1
+        }
+        destinations["config"].append(config)
+        
+    #cisco_source = cisco_connector(source_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_source = cisco_connector(sources["source"], sources["config"])
+    commands = cisco_source.full_create_command_template()
+    cisco_source.execute_commands(commands)
+
+    #cisco_destiny = cisco_connector(destination_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_destiny = cisco_connector(destinations["destination"], destinations["config"])
+    commands = cisco_destiny.full_create_command_template()
+    cisco_destiny.execute_commands(commands)
+
+def tfs_l2vpn_delete():
+    """
+    Delete L2VPN configurations from Cisco devices.
+
+    This method removes L2VPN configurations from Cisco routers
+
+    Notes:
+        - Uses cisco_connector to generate and execute deletion commands
+        - Clears Network Interface (NI) settings
+    """
+    # Delete Source Endpoint Configuration
+    source_address = "10.60.125.44"
+    cisco_source = cisco_connector(source_address)
+    cisco_source.execute_commands(cisco_source.create_command_template_delete())
+
+    # Delete Destination Endpoint Configuration
+    destination_address = "10.60.125.45"
+    cisco_destiny = cisco_connector(destination_address)
+    cisco_destiny.execute_commands(cisco_destiny.create_command_template_delete())
\ No newline at end of file
diff --git a/src/realizer/tfs/service_types/tfs_l3vpn.py b/src/realizer/tfs/service_types/tfs_l3vpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..93befbca333b7a63ad477ae5b951e5ced486e776
--- /dev/null
+++ b/src/realizer/tfs/service_types/tfs_l3vpn.py
@@ -0,0 +1,141 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH, NBI_L3_PATH
+from src.utils.load_template import load_template
+from src.utils.safe_get import safe_get
+from flask import current_app
+
+def tfs_l3vpn(ietf_intent, response):
+    """
+    Translate L3VPN (Layer 3 Virtual Private Network) intent into a TeraFlow service request.
+
+    Similar to __tfs_l2vpn, but configured for Layer 3 VPN:
+    1. Defines endpoint routers
+    2. Loads service template
+    3. Generates unique service UUID
+    4. Configures service endpoints
+    5. Adds QoS constraints
+    6. Prepares configuration rules for network interfaces
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        response (dict): Response data containing slice information.
+
+    Returns:
+        dict: A TeraFlow service request for L3VPN configuration.
+    """
+    # Hardcoded router endpoints
+    # TODO (should be dynamically determined)
+    origin_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 0, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not origin_router_id:
+        logging.warning("Origin router ID not found in the intent. Skipping L3VPN realization.")
+        return None
+    origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    destination_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 1, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not destination_router_id:
+        logging.warning("Destination router ID not found in the intent. Skipping L3VPN realization.")
+        return None
+    destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+    slice = next((d for d in response if d.get("id") == id), None)
+
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        # Load L3VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "L3-VPN_template_empty.json"))["services"][0]
+        # Configure service UUID
+        tfs_request["service_id"]["service_uuid"]["uuid"] = ietf_intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]["id"]
+
+        # Configure service endpoints
+        for endpoint in tfs_request["service_endpoint_ids"]:
+            endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
+            endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
+
+        # Add service constraints
+        for constraint in slice.get("requirements", []):
+            tfs_request["service_constraints"].append({"custom": constraint})
+
+        # Add configuration rules
+        for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
+            router_id = origin_router_id if i == 1 else destination_router_id
+            router_if = origin_router_if if i == 1 else destination_router_if
+            resource_value = config_rule["custom"]["resource_value"]
+
+            sdp_index = i - 1
+            vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
+            resource_value["router_id"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["vlan_id"] = int(vlan_value)
+            resource_value["address_ip"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["policy_AZ"] = "policyA"
+            resource_value["policy_ZA"] = "policyB"
+            resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
+            config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
+    
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        #self.path = NBI_L3_PATH
+
+        # Load IETF L3VPN service template
+        tfs_request =  load_template(os.path.join(TEMPLATES_PATH, "ietfL3VPN_template_empty.json"))
+
+        # Add path to the request
+        tfs_request["path"] = NBI_L3_PATH
+
+        # Generate service UUID
+        full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["vpn-services"]["vpn-service"][0]["vpn-id"] = full_id
+        # Configure service endpoints
+        for i, site in enumerate(tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["sites"]["site"]):
+
+            # Determine if origin or destination
+            is_origin = (i == 0)
+            sdp_index = 0 if is_origin else 1
+            location = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["node-id"]
+            router_id = origin_router_id if is_origin else destination_router_id
+            router_if = origin_router_if if is_origin else destination_router_if
+
+            # Assign common values
+            site["site-id"] = f"site_{location}"
+            site["locations"]["location"][0]["location-id"] = location
+            site["devices"]["device"][0]["device-id"] = router_id
+            site["devices"]["device"][0]["location"] = location
+
+            access = site["site-network-accesses"]["site-network-access"][0]
+            access["site-network-access-id"] = router_if
+            access["device-reference"] = router_id
+            access["vpn-attachment"]["vpn-id"] = full_id
+
+            # Aplicar restricciones QoS
+            for constraint in slice.get("requirements", []):
+                ctype = constraint["constraint_type"]
+                cvalue = float(constraint["constraint_value"])
+                if constraint["constraint_type"].startswith("one-way-bandwidth"):
+                        unit = constraint["constraint_type"].split("[")[-1].rstrip("]")
+                        multiplier = {"bps": 1, "kbps": 1_000, "Mbps": 1_000_000, "Gbps": 1_000_000_000}.get(unit, 1)
+                        value = int(cvalue * multiplier)
+                        access["service"]["svc-input-bandwidth"] = value
+                        access["service"]["svc-output-bandwidth"] = value
+                elif ctype == "one-way-delay-maximum[milliseconds]":
+                    access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["latency"]["latency-boundary"] = int(cvalue)
+                elif ctype == "availability[%]":
+                    access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["bandwidth"]["guaranteed-bw-percent"] = int(cvalue)
+                elif ctype == "mtu[bytes]":
+                    access["service"]["svc-mtu"] = int(cvalue)
+
+    
+    logging.info(f"L3VPN Intent realized")
+    #self.answer[self.subnet]["VLAN"] = vlan_value
+    return tfs_request
\ No newline at end of file
diff --git a/src/realizer/tfs/tfs_connect.py b/src/realizer/tfs/tfs_connect.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c8334dff5d5cc82d3feaa037dce6f1a7da5ccfd
--- /dev/null
+++ b/src/realizer/tfs/tfs_connect.py
@@ -0,0 +1,48 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from .helpers.tfs_connector import tfs_connector
+from flask import current_app
+from src.utils.send_response import send_response
+from .service_types.tfs_l2vpn import tfs_l2vpn_support
+
+def tfs_connect(requests, tfs_ip):
+    """
+    Connect to TeraflowSDN (TFS) controller and upload services.
+    
+    Args:
+        requests (dict): Dictionary containing services to upload
+        tfs_ip (str): IP address of the TFS controller
+    
+    Returns:
+        response (requests.Response): Response from TFS controller
+    """       
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        response = tfs_connector().webui_post(tfs_ip, requests)
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        for intent in requests["services"]:
+            # Send each separate NBI request
+            path = intent.pop("path")
+            response = tfs_connector().nbi_post(tfs_ip, intent, path)
+
+            if not response.ok:
+                return send_response(False, code=response.status_code, message=f"Teraflow upload failed. Response: {response.text}")
+    
+    # For deploying an L2VPN with path selection (not supported by Teraflow)
+    if current_app.config["TFS_L2VPN_SUPPORT"]:
+        tfs_l2vpn_support(requests["services"])
+    
+    return response
\ No newline at end of file
diff --git a/src/slice_ddbb.json b/src/slice_ddbb.json
deleted file mode 100644
index 0637a088a01e8ddab3bf3fa98dbe804cbde1a0dc..0000000000000000000000000000000000000000
--- a/src/slice_ddbb.json
+++ /dev/null
@@ -1 +0,0 @@
-[]
\ No newline at end of file
diff --git a/src/templates/IPoWDM_orchestrator.json b/src/templates/IPoWDM_orchestrator.json
new file mode 100644
index 0000000000000000000000000000000000000000..60eb0dbaaebca3e0a35d5b9007121119a60443f8
--- /dev/null
+++ b/src/templates/IPoWDM_orchestrator.json
@@ -0,0 +1,31 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}},
+                "service_uuid": {"uuid": "TAPI LSP"}
+            },
+            "service_type": 12,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}},"endpoint_uuid": {"uuid": "mgmt"}},
+                {"device_id": {"device_uuid": {"uuid": "TFS-PACKET"}},"endpoint_uuid": {"uuid": "mgmt"}}
+
+            ],
+            "service_constraints": [],
+
+            "service_config": {"config_rules": [
+                {"action": 1, "ipowdm": {
+                    "endpoint_id": {
+                        "device_id": {"device_uuid": {"uuid": "TFS-PACKET"}},
+                        "endpoint_uuid": {"uuid": "mgmt"}
+                    },
+                    "rule_set": {
+                        "src"  : [],
+                        "dst"  : []
+                    }
+                }}
+            ]}
+        }
+    ]
+}
\ No newline at end of file
diff --git a/src/templates/Optical_slice.json b/src/templates/Optical_slice.json
new file mode 100644
index 0000000000000000000000000000000000000000..94c87fe03f52b7a10acfc537eb5dbabfc6b4a46b
--- /dev/null
+++ b/src/templates/Optical_slice.json
@@ -0,0 +1,28 @@
+{
+   "tapi-common:context" : {
+      "name" : [
+         {
+            "value" : ""
+         }
+      ],
+      "service-interface-point" : [
+         {
+            "uuid" : ""
+         },
+         {
+            "uuid" : ""
+         }
+      ],
+      "tapi-topology:topology-context" : {
+         "topology" : [
+            {
+               "link" : [
+               ],
+               "node" : [
+               ]
+            }
+         ]
+      },
+      "uuid" : ""
+   }
+}
diff --git a/src/templates/TAPI_service.json b/src/templates/TAPI_service.json
new file mode 100644
index 0000000000000000000000000000000000000000..1b09a04cb809d0d959340e6d23a15d65f7f372b0
--- /dev/null
+++ b/src/templates/TAPI_service.json
@@ -0,0 +1,43 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}},
+                "service_uuid": {"uuid": "TAPI LSP"}
+            },
+            "service_type": 11,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}},"endpoint_uuid": {"uuid": "mgmt"}},
+                {"device_id": {"device_uuid": {"uuid": "TFS-PACKET"}},"endpoint_uuid": {"uuid": "mgmt"}}
+
+            ],
+            "service_constraints": [],
+
+            "service_config": {"config_rules": [
+                {"action": 1, "tapi_lsp": {
+                    "endpoint_id": {
+                        "device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}},
+                        "endpoint_uuid": {"uuid": "mgmt"}
+                    },
+                    "rule_set": {
+                        "src": "",
+                        "dst": "",
+                        "uuid": "",
+                        "bw": "",
+                        "tenant_uuid": "",
+                        "direction": "",
+                        "layer_protocol_name": "",
+                        "layer_protocol_qualifier": "",
+                        "lower_frequency_mhz": "",
+                        "upper_frequency_mhz": "",
+                        "link_uuid_path": [
+                        ],
+                        "granularity": "",
+                        "grid_type": ""
+                    }
+                }}
+            ]}
+        }
+    ]
+}
\ No newline at end of file
diff --git a/src/templates/ietf_template_empty.json b/src/templates/ietf_template_empty.json
index cdaf66cdad3fbd7f01c09a7987cf8729600952b0..c484a4a2991e0b34ce691ee14125c2fddb00fa41 100644
--- a/src/templates/ietf_template_empty.json
+++ b/src/templates/ietf_template_empty.json
@@ -29,10 +29,14 @@
            "id":"5GSliceMapping",
            "description":"example 5G Slice mapping",
            "service-tags":{
-              "tag-type":{
-                 "tag-type":"",
-                 "value":""
+            "tag-type": [
+              {
+                "tag-type": "",
+                "tag-type-value": [
+                  ""
+                ]
               }
+            ]
            },
            "slo-sle-policy":{
               "slo-sle-template":""
diff --git a/src/tests/requests/3ggpp_template_green.json b/src/tests/requests/3ggpp_template_green.json
new file mode 100644
index 0000000000000000000000000000000000000000..67a1367b093b84c1dd589c803cee55cb130ce232
--- /dev/null
+++ b/src/tests/requests/3ggpp_template_green.json
@@ -0,0 +1,176 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "CNSliceSubnet1",
+      "RANSliceSubnet1"
+    ]
+  },
+  "CNSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "CN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "CNId",
+        "pLMNInfoList": null,
+        "CNSliceSubnetProfile": {
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+            "dLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "dLLatency": 8,
+          "uLLatency": 8,
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "MidhaulSliceSubnet1"
+    ]
+  },
+  "MidhaulSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "EnergyEfficiency": 5,
+          "EnergyConsumption": 18000,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 650
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-UP1",
+      "EpTransport DU3"
+    ]
+  },
+  "BackhaulSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+            "dLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "dLLatency": 8,
+          "uLLatency": 8,
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-UP2",
+      "EpTransport UPF"
+    ]
+  },
+  "EpTransport CU-UP1": {
+    "IpAddress": "1.1.1.100",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "300"
+    },
+    "NextHopInfo": "1.1.1.1",
+    "qosProfile": "5QI100",
+    "EpApplicationRef": [
+      "EP_F1U CU-UP1"
+    ]
+  },
+  "EP_F1U CU-UP1": {
+    "localAddress": "100.1.1.100",
+    "remoteAddress": "200.1.1.100",
+    "epTransportRef": [
+      "EpTransport CU-UP1"
+    ]
+  },
+  "EpTransport DU3": {
+    "IpAddress": "2.2.2.100",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "300"
+    },
+    "NextHopInfo": "2.2.2.2",
+    "qosProfile": "5QI100",
+    "EpApplicationRef": [
+      "EP_F1U DU3"
+    ]
+  },
+  "EP_F1U DU3": {
+    "localAddress": "200.1.1.100",
+    "remoteAddress": "100.1.1.100",
+    "epTransportRef": [
+      "EpTransport DU3"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json
new file mode 100644
index 0000000000000000000000000000000000000000..ed80ec01d699a6bea39f99b19af1e22550c1c851
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json
@@ -0,0 +1,267 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "BackhaulSliceSubnetN2",
+	  "BackhaulSliceSubnetN31",
+	  "BackhaulSliceSubnetN32"
+    ]
+  },
+  "BackhaulSliceSubnetN2": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N2",
+      "EpTransport AMF-N2"
+    ]
+  },
+  "BackhaulSliceSubnetN31": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 50,
+            "MaxThpt": 100
+          },
+          "dLLatency": 10,
+          "uLLatency": 10
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N31",
+      "EpTransport UPF-N31"
+    ]
+  },
+  "BackhaulSliceSubnetN32": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 5,
+          "uLLatency": 5
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N32",
+      "EpTransport UPF-N32"
+    ]
+  },
+  "EpTransport CU-N2": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 CU-N2"
+    ]
+  },
+  "EP_N2 CU-N2": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.105",
+    "epTransportRef": [
+      "EpTransport CU-N2"
+    ]
+  },
+  "EpTransport AMF-N2": {
+    "IpAddress": "10.60.60.105",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 AMF-N2"
+    ]
+  },
+  "EP_N2 AMF-N2": {
+    "localAddress": "10.60.60.105",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N2"
+    ]
+  },
+  "EpTransport CU-N32": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 CU-N32"
+    ]
+  },
+  "EP_N3 CU-N32": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.10.6",
+    "epTransportRef": [
+      "EpTransport CU-N32"
+    ]
+  },
+  "EpTransport UPF-N32": {
+    "IpAddress": "10.60.10.6",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N32"
+    ]
+  },
+  "EP_N3 UPF-N32": {
+    "localAddress": "10.60.10.6",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N32"
+    ]
+  },
+  "EpTransport CU-N31": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 CU-N31"
+    ]
+  },
+  "EP_N3 CU-N31": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.106",
+    "epTransportRef": [
+      "EpTransport CU-N31"
+    ]
+  },
+  "EpTransport UPF-N31": {
+    "IpAddress": "10.60.60.106",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N31"
+    ]
+  },
+  "EP_N3 UPF-N31": {
+    "localAddress": "10.60.60.106",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N31"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json
new file mode 100644
index 0000000000000000000000000000000000000000..9dab29465a9d992e795a50f1a063bbb5ca05104e
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json
@@ -0,0 +1,131 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "BackhaulSliceSubnetN2"
+    ]
+  },
+  "BackhaulSliceSubnetN2": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 1,
+            "MaxThpt": 2
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 1,
+            "MaxThpt": 2
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N2",
+      "EpTransport AMF-N2"
+    ]
+  },
+  "EpTransport CU-N2": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 CU-N2"
+    ]
+  },
+  "EP_N2 CU-N2": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.105",
+    "epTransportRef": [
+      "EpTransport CU-N2"
+    ]
+  },
+  "EpTransport AMF-N2": {
+    "IpAddress": "10.60.60.105",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 AMF-N2"
+    ]
+  },
+  "EP_N2 AMF-N2": {
+    "localAddress": "10.60.60.105",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N2"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json
new file mode 100644
index 0000000000000000000000000000000000000000..d287a04fbf1da5202daf1a71c98ff2c509535523
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json
@@ -0,0 +1,131 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+	  "BackhaulSliceSubnetN32"
+    ]
+  },
+  "BackhaulSliceSubnetN32": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 5,
+          "uLLatency": 5
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N32",
+      "EpTransport UPF-N32"
+    ]
+  },
+  "EpTransport CU-N32": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 CU-N32"
+    ]
+  },
+  "EP_N3 CU-N32": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.10.6",
+    "epTransportRef": [
+      "EpTransport CU-N32"
+    ]
+  },
+  "EpTransport UPF-N32": {
+    "IpAddress": "10.60.10.6",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N32"
+    ]
+  },
+  "EP_N3 UPF-N32": {
+    "localAddress": "10.60.10.6",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N32"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json
new file mode 100644
index 0000000000000000000000000000000000000000..55232e8eb5a17bfa803995f0751933c82e1df9ec
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json
@@ -0,0 +1,131 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+	  "BackhaulSliceSubnetN31"
+    ]
+  },
+  "BackhaulSliceSubnetN31": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 50,
+            "MaxThpt": 100
+          },
+          "dLLatency": 10,
+          "uLLatency": 10
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N31",
+      "EpTransport UPF-N31"
+    ]
+  },
+  "EpTransport CU-N31": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 CU-N31"
+    ]
+  },
+  "EP_N3 CU-N31": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.106",
+    "epTransportRef": [
+      "EpTransport CU-N31"
+    ]
+  },
+  "EpTransport UPF-N31": {
+    "IpAddress": "10.60.60.106",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N31"
+    ]
+  },
+  "EP_N3 UPF-N31": {
+    "localAddress": "10.60.60.106",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N31"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json b/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json
new file mode 100644
index 0000000000000000000000000000000000000000..300f8666fbb20d501052194393ab5232d22d3424
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json
@@ -0,0 +1,267 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 410,
+            "MaxThpt": 820
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 210,
+            "MaxThpt": 420
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 410,
+            "MaxThpt": 820
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 210,
+            "MaxThpt": 220
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "MidhaulSliceSubnetF1c",
+	  "MidhaulSliceSubnetF1u1",
+	  "MidhaulSliceSubnetF1u2"
+    ]
+  },
+  "MidhaulSliceSubnetF1c": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-F1c",
+      "EpTransport DU-F1c"
+    ]
+  },
+  "MidhaulSliceSubnetF1u1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 5,
+          "uLLatency": 5
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-F1u1",
+      "EpTransport DU-F1u1"
+    ]
+  },
+  "MidhaulSliceSubnetF1u2": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 10,
+          "uLLatency": 10
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-F1u2",
+      "EpTransport DU-F1u2"
+    ]
+  },
+  "EpTransport CU-F1c": {
+    "IpAddress": "10.60.10.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_F1C CU-F1c"
+    ]
+  },
+  "EP_F1C CU-F1c": {
+    "localAddress": "10.60.10.2",
+    "remoteAddress": "10.60.11.2",
+    "epTransportRef": [
+      "EpTransport CU-F1c"
+    ]
+  },
+  "EpTransport DU-F1c": {
+    "IpAddress": "10.60.11.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_F1C DU-F1c"
+    ]
+  },
+  "EP_F1C DU-F1c": {
+    "localAddress": "10.60.11.2",
+    "remoteAddress": "10.60.10.2",
+    "epTransportRef": [
+      "EpTransport DU-F1c"
+    ]
+  },
+  "EpTransport CU-F1u1": {
+    "IpAddress": "10.60.10.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_F1U CU-F1u1"
+    ]
+  },
+  "EP_F1U CU-F1u1": {
+    "localAddress": "10.60.10.2",
+    "remoteAddress": "10.60.11.2",
+    "epTransportRef": [
+      "EpTransport CU-F1c"
+    ]
+  },
+  "EpTransport DU-F1u1": {
+    "IpAddress": "10.60.11.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_F1U DU-F1u1"
+    ]
+  },
+  "EP_F1U DU-F1u1": {
+    "localAddress": "10.60.11.2",
+    "remoteAddress": "10.60.10.2",
+    "epTransportRef": [
+      "EpTransport DU-F1u1"
+    ]
+  },
+  "EpTransport CU-F1u2": {
+    "IpAddress": "10.60.10.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_F1U CU-F1u2"
+    ]
+  },
+  "EP_F1U CU-F1u2": {
+    "localAddress": "10.60.10.2",
+    "remoteAddress": "10.60.11.2",
+    "epTransportRef": [
+      "EpTransport CU-F1u2"
+    ]
+  },
+  "EpTransport DU-F1u2": {
+    "IpAddress": "10.60.11.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_F1U DU-F1u2"
+    ]
+  },
+  "EP_F1U DU-F1u2": {
+    "localAddress": "10.60.11.2",
+    "remoteAddress": "10.60.10.2",
+    "epTransportRef": [
+      "EpTransport DU-F1u2"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/P2MP.json b/src/tests/requests/P2MP.json
new file mode 100644
index 0000000000000000000000000000000000000000..02875dcf74a61c00b953525b93814ccd1167891a
--- /dev/null
+++ b/src/tests/requests/P2MP.json
@@ -0,0 +1,108 @@
+{
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {
+                        "id": "LOW-DELAY",
+                        "description": "Prefer direct link: delay <= 2ms",
+                        "slo-policy": {
+                            "metric-bound": [
+                                {
+                                    "metric-type": "two-way-delay-maximum",
+                                    "metric-unit": "milliseconds",
+                                    "bound": 2
+                                }
+                            ]
+                        }
+                    }
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "slice-long",
+                    "description": "Slice tolerant to intermediate hops",
+                    "slo-sle-policy": {
+                        "slo-sle-template": "LOW-DELAY"
+                    },
+                    "sdps": {
+                        "sdp": [
+                            {
+                                "id": "T1.2",
+                                "node-id": "T1.2",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r1",
+                                            "ac-ipv4-address": "10.10.1.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            },
+                            {
+                                "id": "T1.1",
+                                "node-id": "T1.1",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r2",
+                                            "ac-ipv4-address": "10.10.2.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            },
+                            {
+                                "id": "T2.1",
+                                "node-id": "T2.1",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r3",
+                                            "ac-ipv4-address": "10.10.3.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            },
+                            {
+                                "id": "T1.3",
+                                "node-id": "T1.3",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r3",
+                                            "ac-ipv4-address": "10.10.4.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            }
+                        ]
+                    },
+                    "connection-groups": {
+                        "connection-group": [
+                            {
+                                "id": "cg-long",
+                                "connectivity-type": "ietf-vpn-common:any-to-any",
+                                "connectivity-construct": [
+                                    {
+                                        "id": "cc-p2mp",
+                                        "p2mp-sdp": {
+                                            "root-sdp-id": "T2.1",
+                                            "leaf-sdp-id": [
+                                                "T1.1",
+                                                "T1.2",
+                                                "T1.3"
+
+                                            ]
+                                        }
+                                    }
+                                ]
+                            }
+                        ]
+                    }
+                }
+            ]
+        }
+    }
\ No newline at end of file
diff --git a/src/tests/requests/create_slice_1.json b/src/tests/requests/create_slice_1.json
new file mode 100644
index 0000000000000000000000000000000000000000..bcefe01525f7413a98f89c8fa5266b7c6cec8329
--- /dev/null
+++ b/src/tests/requests/create_slice_1.json
@@ -0,0 +1,81 @@
+{
+    "ietf-network-slice-service:network-slice-services": {
+        "slo-sle-templates": {
+            "slo-sle-template": [
+                {
+                    "id": "LOW-DELAY",
+                    "description": "optical-slice",
+                    "slo-policy": {
+                        "metric-bound": [
+                            {
+                                "metric-type": "two-way-delay-maximum",
+                                "metric-unit": "milliseconds",
+                                "bound": 2
+                            }
+                        ]
+                    }
+                }
+            ]
+        },
+        "slice-service": [
+            {
+                "id": "slice-long",
+                "description": "Slice tolerant to intermediate hops",
+                "slo-sle-policy": {
+                    "slo-sle-template": "LOW-DELAY"
+                },
+                "sdps": {
+                    "sdp": [
+                        {
+                            "id": "Ethernet110",
+                            "node-id": "Phoenix-1",
+                            "attachment-circuits": {
+                                "attachment-circuit": [
+                                    {
+                                        "id": "ac-r1",
+                                        "ac-ipv4-address": "10.10.1.1",
+                                        "ac-ipv4-prefix-length": 24
+                                    }
+                                ]
+                            }
+                        },
+                        {
+                            "id": "Ethernet220",
+                            "node-id": "Phoenix-2",
+                            "attachment-circuits": {
+                                "attachment-circuit": [
+                                    {
+                                        "id": "ac-r2",
+                                        "ac-ipv4-address": "10.10.2.1",
+                                        "ac-ipv4-prefix-length": 24
+                                    }
+                                ]
+                            }
+                        }
+                    ]
+                },
+                "connection-groups": {
+                    "connection-group": [
+                        {
+                            "id": "cg-long",
+                            "connectivity-type": "ietf-vpn-common:any-to-any",
+                            "connectivity-construct": [
+                                {
+                                    "id": "cc-long",
+                                    "a2a-sdp": [
+                                        {
+                                            "sdp-id": "Ethernet110"
+                                        },
+                                        {
+                                            "sdp-id": "Ethernet220"
+                                        }
+                                    ]
+                                }
+                            ]
+                        }
+                    ]
+                }
+            }
+        ]
+    }
+}
\ No newline at end of file
diff --git a/src/tests/requests/ietf_green_request.json b/src/tests/requests/ietf_green_request.json
new file mode 100644
index 0000000000000000000000000000000000000000..5edae753b82526b19b865cc8c834260d580679dd
--- /dev/null
+++ b/src/tests/requests/ietf_green_request.json
@@ -0,0 +1,172 @@
+{
+  "ietf-network-slice-service:network-slice-services": {
+    "slo-sle-templates": {
+      "slo-sle-template": [
+        {
+          "id": "B",
+          "description": "",
+          "slo-policy": {
+            "metric-bound": [
+              {
+                "metric-type": "energy_consumption",
+                "metric-unit": "kWh",
+                "bound": 20200
+              },
+              {
+                "metric-type": "energy_efficiency",
+                "metric-unit": "Wats/bps",
+                "bound": 6
+              },
+              {
+                "metric-type": "carbon_emission",
+                "metric-unit": "grams of CO2 per kWh",
+                "bound": 750
+              },
+              {
+                "metric-type": "renewable_energy_usage",
+                "metric-unit": "rate",
+                "bound": 0.5
+              }
+            ]
+          },
+          "sle-policy": {
+            "security": "",
+            "isolation": "",
+            "path-constraints": {
+              "service-functions": "",
+              "diversity": {
+                "diversity": {
+                  "diversity-type": ""
+                }
+              }
+            }
+          }
+        }
+      ]
+    },
+    "slice-service": [
+      {
+        "id": "slice-service-88a585f7-a432-4312-8774-6210fb0b2342",
+        "description": "Transport network slice mapped with 3GPP slice NetworkSlice1",
+        "service-tags": {
+            "tag-type": [
+              {
+                "tag-type": "service",
+                "tag-type-value": [
+                  "L2"
+                ]
+              }
+            ]
+        },
+        "slo-sle-policy": {
+          "slo-sle-template": "B"
+        },
+        "status": {},
+        "sdps": {
+          "sdp": [
+            {
+              "id": "A",
+              "geo-location": "",
+              "node-id": "CU-N32",
+              "sdp-ip-address": "10.60.11.3",
+              "tp-ref": "",
+              "service-match-criteria": {
+                "match-criterion": [
+                  {
+                    "index": 1,
+                    "match-type": "VLAN",
+                    "value": "101",
+                    "target-connection-group-id": "CU-N32_UPF-N32"
+                  }
+                ]
+              },
+              "incoming-qos-policy": "",
+              "outgoing-qos-policy": "",
+              "sdp-peering": {
+                "peer-sap-id": "",
+                "protocols": ""
+              },
+              "ac-svc-ref": [],
+              "attachment-circuits": {
+                "attachment-circuit": [
+                  {
+                    "id": "100",
+                    "ac-ipv4-address": "10.60.11.3",
+                    "ac-ipv4-prefix-length": 0,
+                    "sdp-peering": {
+                      "peer-sap-id": "4.4.4.4"
+                    },
+                    "status": {}
+                  }
+                ]
+              },
+              "status": {},
+              "sdp-monitoring": ""
+            },
+            {
+              "id": "B",
+              "geo-location": "",
+              "node-id": "UPF-N32",
+              "sdp-ip-address": "10.60.10.6",
+              "tp-ref": "",
+              "service-match-criteria": {
+                "match-criterion": [
+                  {
+                    "index": 1,
+                    "match-type": "VLAN",
+                    "value": "101",
+                    "target-connection-group-id": "CU-N32_UPF-N32"
+                  }
+                ]
+              },
+              "incoming-qos-policy": "",
+              "outgoing-qos-policy": "",
+              "sdp-peering": {
+                "peer-sap-id": "",
+                "protocols": ""
+              },
+              "ac-svc-ref": [],
+              "attachment-circuits": {
+                "attachment-circuit": [
+                  {
+                    "id": "200",
+                    "ac-ipv4-address": "10.60.10.6",
+                    "ac-ipv4-prefix-length": 0,
+                    "sdp-peering": {
+                      "peer-sap-id": "5.5.5.5"
+                    },
+                    "status": {}
+                  }
+                ]
+              },
+              "status": {},
+              "sdp-monitoring": ""
+            }
+          ]
+        },
+        "connection-groups": {
+          "connection-group": [
+            {
+              "id": "CU-N32_UPF-N32",
+              "connectivity-type": "ietf-vpn-common:any-to-any",
+              "connectivity-construct": [
+                {
+                  "id": 1,
+                  "a2a-sdp": [
+                    {
+                      "sdp-id": "A"
+                    },
+                    {
+                      "sdp-id": "B"
+                    }
+                  ]
+                }
+              ],
+              "status": {}
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/l3vpn_test.json b/src/tests/requests/l3vpn_test.json
new file mode 100644
index 0000000000000000000000000000000000000000..4564739ad6eea215a0a5b8892a597e110f706adc
--- /dev/null
+++ b/src/tests/requests/l3vpn_test.json
@@ -0,0 +1,164 @@
+{
+    "ietf-network-slice-service:network-slice-services": {
+      "slo-sle-templates": {
+        "slo-sle-template": [
+          {
+            "id": "A",
+            "description": "",
+            "slo-policy": {
+              "metric-bound": [
+                {
+                  "metric-type": "one-way-bandwidth",
+                  "metric-unit": "kbps",
+                  "bound": 20000000.67
+                },
+                {
+                  "metric-type": "one-way-delay-maximum",
+                  "metric-unit": "milliseconds",
+                  "bound": 5.5
+                }
+              ],
+            "availability": 95,
+            "mtu": 1450
+            },
+            "sle-policy": {
+              "security": "",
+              "isolation": "",
+              "path-constraints": {
+                "service-functions": "",
+                "diversity": {
+                  "diversity": {
+                    "diversity-type": ""
+                  }
+                }
+              }
+            }
+          }
+        ]
+      },
+      "slice-service": [
+        {
+          "id": "slice-service-91327140-7361-41b3-aa45-e84a7fb40b79",
+          "description": "Transport network slice mapped with 3GPP slice NetworkSlice1",
+          "service-tags": {
+             "tag-type": [
+               {
+                 "tag-type": "service",
+                 "tag-type-value": [
+                   "L3"
+                 ]
+               }
+             ]
+          },
+          "slo-sle-policy": {
+            "slo-sle-template": "A"
+          },
+          "status": {},
+          "sdps": {
+            "sdp": [
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "CU-N2",
+                "sdp-ip-address": "10.60.11.3",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "100",
+                      "ac-ipv4-address": "10.60.11.3",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "1.1.1.1"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              },
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "AMF-N2",
+                "sdp-ip-address": "10.60.60.105",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "200",
+                      "ac-ipv4-address": "10.60.60.105",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "3.3.3.3"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              }
+            ]
+          },
+          "connection-groups": {
+            "connection-group": [
+              {
+                "id": "CU-N2_AMF-N2",
+                "connectivity-type": "ietf-vpn-common:any-to-any",
+                "connectivity-construct": [
+                  {
+                    "id": 1,
+                    "a2a-sdp": [
+                      {
+                        "sdp-id": "01"
+                      },
+                      {
+                        "sdp-id": "02"
+                      }
+                    ]
+                  }
+                ],
+                "status": {}
+              }
+            ]
+          }
+        }
+      ]
+    }
+  }
\ No newline at end of file
diff --git a/src/tests/requests/slice_request.json b/src/tests/requests/slice_request.json
new file mode 100644
index 0000000000000000000000000000000000000000..f2150783ae098dc5e9511a986eb60c04f046282f
--- /dev/null
+++ b/src/tests/requests/slice_request.json
@@ -0,0 +1,162 @@
+{
+    "ietf-network-slice-service:network-slice-services": {
+      "slo-sle-templates": {
+        "slo-sle-template": [
+          {
+            "id": "A",
+            "description": "",
+            "slo-policy": {
+              "metric-bound": [
+                {
+                  "metric-type": "one-way-bandwidth",
+                  "metric-unit": "kbps",
+                  "bound": 2000
+                },
+                {
+                  "metric-type": "one-way-delay-maximum",
+                  "metric-unit": "milliseconds",
+                  "bound": 5
+                }
+              ]
+            },
+            "sle-policy": {
+              "security": "",
+              "isolation": "",
+              "path-constraints": {
+                "service-functions": "",
+                "diversity": {
+                  "diversity": {
+                    "diversity-type": ""
+                  }
+                }
+              }
+            }
+          }
+        ]
+      },
+      "slice-service": [
+        {
+          "id": "slice-service-11327140-7361-41b3-aa45-e84a7fb40be9",
+          "description": "Transport network slice mapped with 3GPP slice NetworkSlice1",
+          "service-tags": {
+             "tag-type": [
+               {
+                 "tag-type": "service",
+                 "tag-type-value": [
+                   "L2"
+                 ]
+               }
+             ]
+          },
+          "slo-sle-policy": {
+            "slo-sle-template": "A"
+          },
+          "status": {},
+          "sdps": {
+            "sdp": [
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "CU-N2",
+                "sdp-ip-address": "10.60.11.3",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "100",
+                      "ac-ipv4-address": "10.60.11.3",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "1.1.1.1"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              },
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "AMF-N2",
+                "sdp-ip-address": "10.60.60.105",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "200",
+                      "ac-ipv4-address": "10.60.60.105",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "3.3.3.3"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              }
+            ]
+          },
+          "connection-groups": {
+            "connection-group": [
+              {
+                "id": "CU-N2_AMF-N2",
+                "connectivity-type": "ietf-vpn-common:any-to-any",
+                "connectivity-construct": [
+                  {
+                    "id": 1,
+                    "a2a-sdp": [
+                      {
+                        "sdp-id": "01"
+                      },
+                      {
+                        "sdp-id": "02"
+                      }
+                    ]
+                  }
+                ],
+                "status": {}
+              }
+            ]
+          }
+        }
+      ]
+    }
+  }
\ No newline at end of file
diff --git a/src/tests/test_api.py b/src/tests/test_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..7264ab9b0ce115c8c318efda6e078334c6cb76b4
--- /dev/null
+++ b/src/tests/test_api.py
@@ -0,0 +1,301 @@
+import json
+import pytest
+import os
+from unittest.mock import patch, Mock, MagicMock
+from pathlib import Path
+from dotenv import load_dotenv
+import sqlite3
+import time
+from flask import Flask
+from src.main import NSController
+from src.api.main import Api
+
+
+# Load environment variables
+load_dotenv()
+
+@pytest.fixture(scope="session")
+def flask_app():
+    """Crea una app Flask mínima para los tests."""
+    app = Flask(__name__)
+    app.config.update({
+        "TESTING": True,
+        "SERVER_NAME": "localhost",
+        'NRP_ENABLED': os.getenv('NRP_ENABLED', 'False').lower() == 'true',
+        'PLANNER_ENABLED': os.getenv('PLANNER_ENABLED', 'False').lower() == 'true',
+        'PCE_EXTERNAL': os.getenv('PCE_EXTERNAL', 'False').lower() == 'true',
+        'DUMMY_MODE': os.getenv('DUMMY_MODE', 'True').lower() == 'true',
+        'DUMP_TEMPLATES': os.getenv('DUMP_TEMPLATES', 'False').lower() == 'true',
+        'TFS_L2VPN_SUPPORT': os.getenv('TFS_L2VPN_SUPPORT', 'False').lower() == 'true',
+        'WEBUI_DEPLOY': os.getenv('WEBUI_DEPLOY', 'True').lower() == 'true',
+        'UPLOAD_TYPE': os.getenv('UPLOAD_TYPE', 'WEBUI'),
+        'PLANNER_TYPE': os.getenv('PLANNER_TYPE', 'ENERGY'),
+        'HRAT_IP' : os.getenv('HRAT_IP', '10.0.0.1'),
+        'OPTICAL_PLANNER_IP' : os.getenv('OPTICAL_PLANNER_IP', '10.0.0.1')
+    })
+    return app
+
+
+@pytest.fixture(autouse=True)
+def push_flask_context(flask_app):
+    """Empuja automáticamente un contexto Flask para cada test."""
+    with flask_app.app_context():
+        yield
+
+@pytest.fixture
+def temp_db(tmp_path):
+    """Fixture to create and cleanup test database using SQLite instead of JSON."""
+    test_db_name = str(tmp_path / "test_slice.db")
+    
+    # Create database with proper schema
+    conn = sqlite3.connect(test_db_name)
+    cursor = conn.cursor()
+    cursor.execute("""
+        CREATE TABLE IF NOT EXISTS slice (
+            slice_id TEXT PRIMARY KEY,
+            intent TEXT NOT NULL,
+            controller TEXT NOT NULL
+        )
+    """)
+    conn.commit()
+    conn.close()
+    
+    yield test_db_name
+    
+    # Cleanup - properly close connections and remove file
+    try:
+        time.sleep(0.1)
+        if os.path.exists(test_db_name):
+            os.remove(test_db_name)
+    except Exception:
+        time.sleep(0.5)
+        try:
+            if os.path.exists(test_db_name):
+                os.remove(test_db_name)
+        except:
+            pass
+
+
+@pytest.fixture
+def env_variables():
+    """Fixture to load and provide environment variables."""
+    env_vars = {
+        'NRP_ENABLED': os.getenv('NRP_ENABLED', 'False').lower() == 'true',
+        'PLANNER_ENABLED': os.getenv('PLANNER_ENABLED', 'False').lower() == 'true',
+        'PCE_EXTERNAL': os.getenv('PCE_EXTERNAL', 'False').lower() == 'true',
+        'DUMMY_MODE': os.getenv('DUMMY_MODE', 'True').lower() == 'true',
+        'DUMP_TEMPLATES': os.getenv('DUMP_TEMPLATES', 'False').lower() == 'true',
+        'TFS_L2VPN_SUPPORT': os.getenv('TFS_L2VPN_SUPPORT', 'False').lower() == 'true',
+        'WEBUI_DEPLOY': os.getenv('WEBUI_DEPLOY', 'True').lower() == 'true',
+        'UPLOAD_TYPE': os.getenv('UPLOAD_TYPE', 'WEBUI'),
+        'PLANNER_TYPE': os.getenv('PLANNER_TYPE', 'standard'),
+    }
+    return env_vars
+
+
+@pytest.fixture
+def controller_with_mocked_db(temp_db):
+    """Crea un NSController con base de datos mockeada."""
+    with patch('src.database.db.DB_NAME', temp_db):
+        yield NSController(controller_type="TFS")
+
+
+@pytest.fixture
+def ietf_intent():
+    """Intent válido en formato IETF."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {
+                        "id": "qos1",
+                        "slo-policy": {
+                            "metric-bound": [
+                                {
+                                    "metric-type": "one-way-bandwidth",
+                                    "metric-unit": "kbps",
+                                    "bound": 1000
+                                }
+                            ]
+                        }
+                    }
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "slice-test-1",
+                    "sdps": {
+                        "sdp": [
+                            {
+                                "sdp-ip-address": "10.0.0.1",
+                                "node-id": "node1",
+                                "service-match-criteria": {
+                                    "match-criterion": [
+                                        {
+                                            "match-type": "vlan",
+                                            "value": "100"
+                                        }
+                                    ]
+                                },
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "sdp-peering": {
+                                                "peer-sap-id": "R1"
+                                            }
+                                        }
+                                    ]
+                                },
+                            },
+                            {
+                                "sdp-ip-address": "10.0.0.2",
+                                "node-id": "node2",
+                                "service-match-criteria": {
+                                    "match-criterion": [
+                                        {
+                                            "match-type": "vlan",
+                                            "value": "100"
+                                        }
+                                    ]
+                                },
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "sdp-peering": {
+                                                "peer-sap-id": "R2"
+                                            }
+                                        }
+                                    ]
+                                },
+                            },
+                        ]
+                    },
+                    "service-tags": {"tag-type": {"value": "L3VPN"}},
+                }
+            ],
+        }
+    }
+
+
+class TestBasicApiOperations:
+    """Tests for basic API operations."""
+    
+    def test_get_flows_empty(self, controller_with_mocked_db):
+        """Debe devolver error cuando no hay slices."""
+        result, code = Api(controller_with_mocked_db).get_flows()
+        assert code == 404
+        assert result["success"] is False
+        assert result["data"] is None
+    
+    def test_add_flow_success(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder añadir un flow exitosamente."""
+        with patch('src.database.db.save_data') as mock_save:
+            result, code = Api(controller_with_mocked_db).add_flow(ietf_intent)
+            assert code == 201
+            assert result["success"] is True
+            assert "slices" in result["data"]
+    
+    def test_add_and_get_flow(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder añadir un flow y luego recuperarlo."""
+        with patch('src.database.db.save_data') as mock_save, \
+             patch('src.database.db.get_all_data') as mock_get_all:
+            
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            
+            mock_get_all.return_value = [
+                {
+                    "slice_id": "slice-test-1",
+                    "intent": ietf_intent,
+                    "controller": "TFS"
+                }
+            ]
+            
+            flows, code = Api(controller_with_mocked_db).get_flows()
+            assert code == 200
+            assert any(s["slice_id"] == "slice-test-1" for s in flows)
+    
+    def test_modify_flow_success(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder modificar un flow existente."""
+        with patch('src.database.db.update_data') as mock_update:
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            new_intent = ietf_intent.copy()
+            new_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = "qos2"
+            
+            result, code = Api(controller_with_mocked_db).modify_flow("slice-test-1", new_intent)
+            print(result)
+            assert code == 200
+            assert result["success"] is True
+    
+    def test_delete_specific_flow_success(self, controller_with_mocked_db, ietf_intent):
+        """Debe borrar un flow concreto."""
+        with patch('src.database.db.delete_data') as mock_delete:
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            result, code = Api(controller_with_mocked_db).delete_flows("slice-test-1")
+            assert code == 204
+            assert result == {}
+    
+    def test_delete_all_flows_success(self, controller_with_mocked_db):
+        """Debe borrar todos los flows."""
+        with patch('src.database.db.delete_all_data') as mock_delete_all:
+            result, code = Api(controller_with_mocked_db).delete_flows()
+            assert code == 204
+            assert result == {}
+    
+    def test_get_specific_flow(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder recuperar un flow específico."""
+        with patch('src.database.db.get_data') as mock_get:
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            mock_get.return_value = {
+                "slice_id": "slice-test-1",
+                "intent": ietf_intent,
+                "controller": "TFS"
+            }
+            
+            result, code = Api(controller_with_mocked_db).get_flows("slice-test-1")
+            assert code == 200
+            assert result["slice_id"] == "slice-test-1"
+
+
+class TestErrorHandling:
+    """Tests for error handling."""
+    
+    def test_add_flow_with_empty_intent(self, controller_with_mocked_db):
+        """Debe fallar si se pasa un intent vacío."""
+        result, code = Api(controller_with_mocked_db).add_flow({})
+        assert code in (400, 404, 500)
+        assert result["success"] is False
+    
+    def test_add_flow_with_none(self, controller_with_mocked_db):
+        """Debe fallar si se pasa None como intent."""
+        result, code = Api(controller_with_mocked_db).add_flow(None)
+        assert code in (400, 500)
+        assert result["success"] is False
+    
+    def test_get_nonexistent_slice(self, controller_with_mocked_db):
+        """Debe devolver 404 si se pide un slice inexistente."""
+        with patch('src.database.db.get_data') as mock_get:
+            mock_get.side_effect = ValueError("No slice found")
+            
+            result, code = Api(controller_with_mocked_db).get_flows("slice-does-not-exist")
+            assert code == 404
+            assert result["success"] is False
+    
+    def test_modify_nonexistent_flow(self, controller_with_mocked_db, ietf_intent):
+        """Debe fallar si se intenta modificar un flow inexistente."""
+        with patch('src.database.db.update_data') as mock_update:
+            mock_update.side_effect = ValueError("No slice found")
+            
+            result, code = Api(controller_with_mocked_db).modify_flow("nonexistent", ietf_intent)
+            assert code == 404
+            assert result["success"] is False
+    
+    def test_delete_nonexistent_flow(self, controller_with_mocked_db):
+        """Debe fallar si se intenta eliminar un flow inexistente."""
+        with patch('src.database.db.delete_data') as mock_delete:
+            mock_delete.side_effect = ValueError("No slice found")
+            
+            result, code = Api(controller_with_mocked_db).delete_flows("nonexistent")
+            assert code == 404
+            assert result["success"] is False
+
+
diff --git a/src/tests/test_database.py b/src/tests/test_database.py
new file mode 100644
index 0000000000000000000000000000000000000000..06034eb6be1ea730e75878d6aba337c515093d83
--- /dev/null
+++ b/src/tests/test_database.py
@@ -0,0 +1,585 @@
+import pytest
+import sqlite3
+import json
+import os
+import time
+from unittest.mock import patch, MagicMock
+from src.database.db import (
+    init_db,
+    save_data,
+    update_data,
+    delete_data,
+    get_data,
+    get_all_data,
+    delete_all_data,
+    DB_NAME
+)
+from src.database.store_data import store_data
+
+
+@pytest.fixture
+def test_db(tmp_path):
+    """Fixture to create and cleanup test database."""
+    test_db_name = str(tmp_path / "test_slice.db")
+    
+    # Use test database
+    with patch('src.database.db.DB_NAME', test_db_name):
+        conn = sqlite3.connect(test_db_name)
+        cursor = conn.cursor()
+        cursor.execute("""
+            CREATE TABLE IF NOT EXISTS slice (
+                slice_id TEXT PRIMARY KEY,
+                intent TEXT NOT NULL,
+                controller TEXT NOT NULL
+            )
+        """)
+        conn.commit()
+        conn.close()
+        
+        yield test_db_name
+        
+        # Cleanup - Close all connections and remove file
+        try:
+            # Force SQLite to release locks
+            sqlite3.connect(':memory:').execute('VACUUM').close()
+            
+            # Wait a moment for file locks to release
+            import time
+            time.sleep(0.1)
+            
+            # Remove the file if it exists
+            if os.path.exists(test_db_name):
+                os.remove(test_db_name)
+        except Exception as e:
+            # On Windows, sometimes files are locked. Try again after a delay
+            import time
+            time.sleep(0.5)
+            try:
+                if os.path.exists(test_db_name):
+                    os.remove(test_db_name)
+            except:
+                pass  # If it still fails, let pytest's tmp_path cleanup handle it
+
+
+@pytest.fixture
+def sample_intent():
+    """Fixture providing sample network slice intent."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slice-service": [{
+                "id": "slice-service-12345",
+                "description": "Test network slice",
+                "service-tags": {"tag-type": {"value": "L2VPN"}},
+                "sdps": {
+                    "sdp": [{
+                        "node-id": "node1",
+                        "sdp-ip-address": "10.0.0.1"
+                    }]
+                }
+            }],
+            "slo-sle-templates": {
+                "slo-sle-template": [{
+                    "id": "profile1",
+                    "slo-policy": {
+                        "metric-bound": [{
+                            "metric-type": "one-way-bandwidth",
+                            "metric-unit": "kbps",
+                            "bound": 1000
+                        }]
+                    }
+                }]
+            }
+        }
+    }
+
+
+@pytest.fixture
+def simple_intent():
+    """Fixture providing simple intent for basic testing."""
+    return {
+        "bandwidth": "1Gbps",
+        "latency": "10ms",
+        "provider": "opensec"
+    }
+
+
+class TestInitDb:
+    """Tests for database initialization."""
+    
+    def test_init_db_creates_table(self, tmp_path):
+        """Test that init_db creates the slice table."""
+        test_db = str(tmp_path / "test.db")
+        
+        with patch('src.database.db.DB_NAME', test_db):
+            init_db()
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='slice'")
+            result = cursor.fetchone()
+            conn.close()
+            time.sleep(0.05)  # Brief pause for file lock release
+            
+            assert result is not None
+            assert result[0] == 'slice'
+    
+    def test_init_db_creates_correct_columns(self, tmp_path):
+        """Test that init_db creates table with correct columns."""
+        test_db = str(tmp_path / "test.db")
+        
+        with patch('src.database.db.DB_NAME', test_db):
+            init_db()
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("PRAGMA table_info(slice)")
+            columns = cursor.fetchall()
+            conn.close()
+            time.sleep(0.05)
+            
+            column_names = [col[1] for col in columns]
+            assert "slice_id" in column_names
+            assert "intent" in column_names
+            assert "controller" in column_names
+    
+    def test_init_db_idempotent(self, tmp_path):
+        """Test that init_db can be called multiple times without error."""
+        test_db = str(tmp_path / "test.db")
+        
+        with patch('src.database.db.DB_NAME', test_db):
+            init_db()
+            init_db()  # Should not raise error
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='slice'")
+            result = cursor.fetchone()
+            conn.close()
+            time.sleep(0.05)
+            
+            assert result is not None
+
+
+class TestSaveData:
+    """Tests for save_data function."""
+    
+    def test_save_data_success(self, test_db, simple_intent):
+        """Test successful data saving."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            assert result is not None
+            assert result[0] == "slice-001"
+            assert result[2] == "TFS"
+            assert json.loads(result[1]) == simple_intent
+    
+    def test_save_data_with_complex_intent(self, test_db, sample_intent):
+        """Test saving complex nested intent structure."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT intent FROM slice WHERE slice_id = ?", (slice_id,))
+            result = cursor.fetchone()
+            conn.close()
+            
+            retrieved_intent = json.loads(result[0])
+            assert retrieved_intent == sample_intent
+    
+    def test_save_data_duplicate_slice_id_raises_error(self, test_db, simple_intent):
+        """Test that saving duplicate slice_id raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            
+            with pytest.raises(ValueError, match="already exists"):
+                save_data("slice-001", simple_intent, "TFS")
+    
+    def test_save_data_multiple_slices(self, test_db, simple_intent):
+        """Test saving multiple different slices."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT COUNT(*) FROM slice")
+            count = cursor.fetchone()[0]
+            conn.close()
+            
+            assert count == 2
+    
+    def test_save_data_with_different_controllers(self, test_db, simple_intent):
+        """Test saving data with different controller types."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-tfs", simple_intent, "TFS")
+            save_data("slice-ixia", simple_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-tfs",))
+            tfs_result = cursor.fetchone()
+            cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-ixia",))
+            ixia_result = cursor.fetchone()
+            conn.close()
+            
+            assert tfs_result[0] == "TFS"
+            assert ixia_result[0] == "IXIA"
+
+
+class TestUpdateData:
+    """Tests for update_data function."""
+    
+    def test_update_data_success(self, test_db, simple_intent):
+        """Test successful data update."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            
+            updated_intent = {"bandwidth": "2Gbps", "latency": "5ms", "provider": "opensec"}
+            update_data("slice-001", updated_intent, "TFS")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT intent FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            retrieved_intent = json.loads(result[0])
+            assert retrieved_intent == updated_intent
+    
+    def test_update_data_nonexistent_slice_raises_error(self, test_db, simple_intent):
+        """Test that updating nonexistent slice raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            with pytest.raises(ValueError, match="No slice found"):
+                update_data("nonexistent-slice", simple_intent, "TFS")
+    
+    def test_update_data_controller_type(self, test_db, simple_intent):
+        """Test updating controller type."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            update_data("slice-001", simple_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            assert result[0] == "IXIA"
+    
+    def test_update_data_complex_intent(self, test_db, sample_intent):
+        """Test updating with complex nested structure."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "TFS")
+            
+            updated_sample = sample_intent.copy()
+            updated_sample["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = "Updated description"
+            
+            update_data(slice_id, updated_sample, "IXIA")
+            
+            retrieved = get_data(slice_id)
+            assert retrieved["intent"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] == "Updated description"
+            assert retrieved["controller"] == "IXIA"
+
+
+class TestDeleteData:
+    """Tests for delete_data function."""
+    
+    def test_delete_data_success(self, test_db, simple_intent):
+        """Test successful data deletion."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            delete_data("slice-001")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            assert result is None
+    
+    def test_delete_data_nonexistent_slice_raises_error(self, test_db):
+        """Test that deleting nonexistent slice raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            with pytest.raises(ValueError, match="No slice found"):
+                delete_data("nonexistent-slice")
+    
+    def test_delete_data_multiple_slices(self, test_db, simple_intent):
+        """Test deleting one slice doesn't affect others."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            
+            delete_data("slice-001")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT COUNT(*) FROM slice")
+            count = cursor.fetchone()[0]
+            cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-002",))
+            remaining = cursor.fetchone()
+            conn.close()
+            
+            assert count == 1
+            assert remaining[0] == "slice-002"
+
+
+class TestGetData:
+    """Tests for get_data function."""
+    
+    def test_get_data_success(self, test_db, simple_intent):
+        """Test retrieving existing data."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            result = get_data("slice-001")
+            
+            assert result["slice_id"] == "slice-001"
+            assert result["intent"] == simple_intent
+            assert result["controller"] == "TFS"
+    
+    def test_get_data_nonexistent_raises_error(self, test_db):
+        """Test that getting nonexistent slice raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            with pytest.raises(ValueError, match="No slice found"):
+                get_data("nonexistent-slice")
+    
+    def test_get_data_json_parsing(self, test_db, sample_intent):
+        """Test that returned intent is parsed JSON."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "TFS")
+            result = get_data(slice_id)
+            
+            assert isinstance(result["intent"], dict)
+            assert result["intent"] == sample_intent
+    
+    def test_get_data_returns_all_fields(self, test_db, simple_intent):
+        """Test that get_data returns all fields."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            result = get_data("slice-001")
+            
+            assert "slice_id" in result
+            assert "intent" in result
+            assert "controller" in result
+            assert len(result) == 3
+
+
+class TestGetAllData:
+    """Tests for get_all_data function."""
+    
+    def test_get_all_data_empty_database(self, test_db):
+        """Test retrieving all data from empty database."""
+        with patch('src.database.db.DB_NAME', test_db):
+            result = get_all_data()
+            assert result == []
+    
+    def test_get_all_data_single_slice(self, test_db, simple_intent):
+        """Test retrieving all data with single slice."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            result = get_all_data()
+            
+            assert len(result) == 1
+            assert result[0]["slice_id"] == "slice-001"
+            assert result[0]["intent"] == simple_intent
+    
+    def test_get_all_data_multiple_slices(self, test_db, simple_intent):
+        """Test retrieving all data with multiple slices."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            save_data("slice-003", simple_intent, "TFS")
+            
+            result = get_all_data()
+            
+            assert len(result) == 3
+            slice_ids = [slice_data["slice_id"] for slice_data in result]
+            assert "slice-001" in slice_ids
+            assert "slice-002" in slice_ids
+            assert "slice-003" in slice_ids
+    
+    def test_get_all_data_json_parsing(self, test_db, sample_intent):
+        """Test that all returned intents are parsed JSON."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "TFS")
+            save_data("slice-002", sample_intent, "IXIA")
+            
+            result = get_all_data()
+            
+            for slice_data in result:
+                assert isinstance(slice_data["intent"], dict)
+    
+    def test_get_all_data_includes_all_controllers(self, test_db, simple_intent):
+        """Test that get_all_data includes slices from different controllers."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-tfs", simple_intent, "TFS")
+            save_data("slice-ixia", simple_intent, "IXIA")
+            
+            result = get_all_data()
+            
+            controllers = [slice_data["controller"] for slice_data in result]
+            assert "TFS" in controllers
+            assert "IXIA" in controllers
+
+
+class TestDeleteAllData:
+    """Tests for delete_all_data function."""
+    
+    def test_delete_all_data_removes_all_slices(self, test_db, simple_intent):
+        """Test that delete_all_data removes all slices."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            
+            delete_all_data()
+            
+            result = get_all_data()
+            assert result == []
+    
+    def test_delete_all_data_empty_database(self, test_db):
+        """Test delete_all_data on empty database doesn't raise error."""
+        with patch('src.database.db.DB_NAME', test_db):
+            delete_all_data()  # Should not raise error
+            result = get_all_data()
+            assert result == []
+
+
+class TestStoreData:
+    """Tests for store_data wrapper function."""
+    
+    def test_store_data_save_new_slice(self, test_db, sample_intent):
+        """Test store_data saves new slice without slice_id."""
+        with patch('src.database.db.DB_NAME', test_db):
+            store_data(sample_intent, None, "TFS")
+            
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            result = get_data(slice_id)
+            
+            assert result["slice_id"] == slice_id
+            assert result["intent"] == sample_intent
+            assert result["controller"] == "TFS"
+    
+    def test_store_data_update_existing_slice(self, test_db, sample_intent):
+        """Test store_data updates existing slice when slice_id provided."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            
+            # Save initial data
+            save_data(slice_id, sample_intent, "TFS")
+            
+            # Update with store_data
+            updated_intent = sample_intent.copy()
+            updated_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = "Updated"
+            store_data(updated_intent, slice_id, "IXIA")
+            
+            result = get_data(slice_id)
+            assert result["controller"] == "IXIA"
+            assert result["intent"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] == "Updated"
+    
+    def test_store_data_extracts_slice_id_from_intent(self, test_db, sample_intent):
+        """Test store_data correctly extracts slice_id from intent structure."""
+        with patch('src.database.db.DB_NAME', test_db):
+            store_data(sample_intent, None, "TFS")
+            
+            all_data = get_all_data()
+            assert len(all_data) == 1
+            assert all_data[0]["slice_id"] == "slice-service-12345"
+    
+    def test_store_data_with_different_controllers(self, test_db, sample_intent):
+        """Test store_data works with different controller types."""
+        with patch('src.database.db.DB_NAME', test_db):
+            store_data(sample_intent, None, "TFS")
+            
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            result = get_data(slice_id)
+            
+            assert result["controller"] == "TFS"
+
+
+class TestDatabaseIntegration:
+    """Integration tests for database operations."""
+    
+    def test_full_lifecycle_create_read_update_delete(self, test_db, simple_intent):
+        """Test complete slice lifecycle."""
+        with patch('src.database.db.DB_NAME', test_db):
+            # Create
+            save_data("slice-lifecycle", simple_intent, "TFS")
+            
+            # Read
+            result = get_data("slice-lifecycle")
+            assert result["slice_id"] == "slice-lifecycle"
+            
+            # Update
+            updated_intent = {"bandwidth": "5Gbps", "latency": "2ms", "provider": "opensec"}
+            update_data("slice-lifecycle", updated_intent, "IXIA")
+            
+            result = get_data("slice-lifecycle")
+            assert result["intent"] == updated_intent
+            assert result["controller"] == "IXIA"
+            
+            # Delete
+            delete_data("slice-lifecycle")
+            
+            with pytest.raises(ValueError):
+                get_data("slice-lifecycle")
+    
+    def test_concurrent_operations(self, test_db, simple_intent):
+        """Test multiple concurrent database operations."""
+        with patch('src.database.db.DB_NAME', test_db):
+            # Create multiple slices
+            for i in range(5):
+                save_data(f"slice-{i}", simple_intent, "TFS" if i % 2 == 0 else "IXIA")
+            
+            # Verify all created
+            all_data = get_all_data()
+            assert len(all_data) == 5
+            
+            # Update some
+            updated_intent = {"updated": True}
+            for i in range(0, 3):
+                update_data(f"slice-{i}", updated_intent, "TFS")
+            
+            # Verify updates
+            for i in range(0, 3):
+                result = get_data(f"slice-{i}")
+                assert result["intent"]["updated"] is True
+            
+            # Delete some
+            delete_data("slice-0")
+            delete_data("slice-2")
+            
+            all_data = get_all_data()
+            assert len(all_data) == 3
+    
+    def test_data_persistence_across_operations(self, test_db, sample_intent):
+        """Test that data persists correctly across multiple operations."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            
+            # Save
+            save_data(slice_id, sample_intent, "TFS")
+            
+            # Get all and verify
+            all_before = get_all_data()
+            assert len(all_before) == 1
+            
+            # Save another
+            save_data("slice-other", sample_intent, "IXIA")
+            all_after = get_all_data()
+            assert len(all_after) == 2
+            
+            # Verify first slice still intact
+            first_slice = get_data(slice_id)
+            assert first_slice["intent"] == sample_intent
+            assert first_slice["controller"] == "TFS"
\ No newline at end of file
diff --git a/src/tests/test_e2e.py b/src/tests/test_e2e.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fb91405568b415b4ef7b5148354fc825e3372a4
--- /dev/null
+++ b/src/tests/test_e2e.py
@@ -0,0 +1,101 @@
+import pytest
+import json
+from pathlib import Path
+from itertools import product
+from src.api.main import Api
+from src.main import NSController
+from app import create_app 
+
+# Carpeta donde están los JSON de requests
+REQUESTS_DIR = Path(__file__).parent / "requests"
+
+# Lista de todos los flags booleanos que quieres probar
+FLAGS_TO_TEST = ["WEBUI_DEPLOY", "DUMP_TEMPLATES", "PLANNER_ENABLED", "PCE_EXTERNAL", "NRP_ENABLED"]
+
+# Valores posibles para PLANNER_TYPE
+PLANNER_TYPE_VALUES = ["ENERGY", "HRAT", "TFS_OPTICAL"]
+
+
+@pytest.fixture
+def app(temp_sqlite_db):
+    """Crea la app Flask con configuración por defecto."""
+    app = create_app()
+    return app
+
+@pytest.fixture
+def client(app):
+    """Cliente de test de Flask para hacer requests."""
+    return app.test_client()
+
+@pytest.fixture
+def set_flags(app):
+    """Cambia directamente los flags en app.config"""
+    def _set(flags: dict):
+        for k, v in flags.items():
+            app.config[k] = v
+    return _set
+
+@pytest.fixture
+def temp_sqlite_db(monkeypatch, tmp_path):
+    """Usa una base de datos SQLite temporal durante los tests."""
+    temp_db_path = tmp_path / "test_slice.db"
+    monkeypatch.setattr("src.database.db.DB_NAME", str(temp_db_path))
+
+    # Inicializa la base de datos temporal
+    from src.database.db import init_db
+    init_db()
+
+    yield temp_db_path
+
+    # Limpieza al finalizar
+    if temp_db_path.exists():
+        temp_db_path.unlink()
+
+# Función para cargar todos los JSONs
+def load_request_files():
+    test_cases = []
+    for f in REQUESTS_DIR.glob("*.json"):
+        with open(f, "r") as file:
+            json_data = json.load(file)
+        test_cases.append(json_data)
+    return test_cases
+
+# Generador de todas las combinaciones de flags
+def generate_flag_combinations():
+    bool_values = [True, False]
+    for combo in product(bool_values, repeat=len(FLAGS_TO_TEST)):
+        bool_flags = dict(zip(FLAGS_TO_TEST, combo)) 
+        for planner_type in PLANNER_TYPE_VALUES:
+            yield {**bool_flags, "PLANNER_TYPE": planner_type}
+
+
+# Fixture que combina cada request con cada combinación de flags
+def generate_test_cases():
+    requests = load_request_files()
+    for json_data in requests:
+        for flags in generate_flag_combinations():
+            expected_codes = [200,201]
+            yield (json_data, flags, expected_codes)
+
+@pytest.mark.parametrize(
+    "json_data, flags, expected_codes",
+    list(generate_test_cases())
+)
+def test_add_and_delete_flow(app, json_data, flags, expected_codes, set_flags, temp_sqlite_db):
+    with app.app_context():
+        set_flags(flags)
+
+        controller = NSController(controller_type="TFS")
+        api = Api(controller)
+
+        # Añadir flujo
+        data, code = api.add_flow(json_data)
+        assert code in expected_codes, f"Flags en fallo: {flags}"
+
+        # Eliminar flujo si fue creado
+        if code == 201 and isinstance(data, dict) and "slice_id" in data:
+            slice_id = data["slice_id"]
+            _, delete_code = api.delete_flows(slice_id=slice_id)
+            assert delete_code == 204, f"No se pudo eliminar el slice {slice_id}"
+
+
diff --git a/src/tests/test_initialization.py b/src/tests/test_initialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..c51cc0659a4d409781c1cdebf7b0512f158d38e6
--- /dev/null
+++ b/src/tests/test_initialization.py
@@ -0,0 +1,37 @@
+import pytest
+
+# Importa tu clase (ajusta el nombre del módulo si es distinto)
+from src.main import NSController  
+
+def test_init_default_values():
+    """Test that default initialization sets expected values."""
+    controller = NSController()
+
+    # Atributo configurable
+    assert controller.controller_type == "TFS"
+
+    # Atributos internos
+    assert controller.path == ""
+    assert controller.response == []
+    assert controller.start_time == 0
+    assert controller.end_time == 0
+    assert controller.setup_time == 0
+
+@pytest.mark.parametrize("controller_type", ["TFS", "IXIA", "custom"])
+def test_init_controller_type(controller_type):
+    """Test initialization with different controller types."""
+    controller = NSController(controller_type=controller_type)
+    assert controller.controller_type == controller_type
+
+def test_init_independence_between_instances():
+    """Test that each instance has independent state (mutable attrs)."""
+    c1 = NSController()
+    c2 = NSController()
+
+    # Modifico una lista en una instancia
+    c1.response.append("test-response")
+
+    # La otra instancia no debería verse afectada
+    assert c2.response == []
+    assert c1.response == ["test-response"]
+
diff --git a/src/tests/test_mapper.py b/src/tests/test_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..219923f0bf70e6920398e945a14205dd913baab9
--- /dev/null
+++ b/src/tests/test_mapper.py
@@ -0,0 +1,639 @@
+import pytest
+import logging
+from unittest.mock import patch, MagicMock, call
+from flask import Flask
+from src.mapper.main import mapper
+from src.mapper.slo_viability import slo_viability
+
+
+@pytest.fixture
+def sample_ietf_intent():
+    """Fixture providing sample IETF network slice intent."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slice-service": [{
+                "id": "slice-service-12345",
+                "description": "Test network slice",
+                "service-tags": {"tag-type": {"value": "L2VPN"}}
+            }],
+            "slo-sle-templates": {
+                "slo-sle-template": [{
+                    "id": "profile1",
+                    "slo-policy": {
+                        "metric-bound": [
+                            {
+                                "metric-type": "one-way-bandwidth",
+                                "metric-unit": "kbps",
+                                "bound": 1000
+                            },
+                            {
+                                "metric-type": "one-way-delay-maximum",
+                                "metric-unit": "milliseconds",
+                                "bound": 10
+                            }
+                        ]
+                    }
+                }]
+            }
+        }
+    }
+
+
+@pytest.fixture
+def sample_nrp_view():
+    """Fixture providing sample NRP view."""
+    return [
+        {
+            "id": "nrp-1",
+            "available": True,
+            "slices": [],
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 1500
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 8
+                }
+            ]
+        },
+        {
+            "id": "nrp-2",
+            "available": True,
+            "slices": [],
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 500
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 15
+                }
+            ]
+        },
+        {
+            "id": "nrp-3",
+            "available": False,
+            "slices": [],
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 2000
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 5
+                }
+            ]
+        }
+    ]
+
+
+@pytest.fixture
+def mock_app():
+    """Fixture providing mock Flask app context."""
+    app = Flask(__name__)
+    app.config = {
+        "NRP_ENABLED": False,
+        "PLANNER_ENABLED": False,
+        "SERVER_NAME": "localhost",
+        "APPLICATION_ROOT": "/",
+        "PREFERRED_URL_SCHEME": "http"  
+    }
+    return app
+
+
+@pytest.fixture
+def app_context(mock_app):
+    """Fixture providing Flask application context."""
+    with mock_app.app_context():
+        yield mock_app
+
+
+class TestSloViability:
+    """Tests for slo_viability function."""
+    
+    def test_slo_viability_meets_all_requirements(self):
+        """Test when NRP meets all SLO requirements."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            },
+            {
+                "metric-type": "one-way-delay-maximum",
+                "bound": 10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 1500
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 8
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert score > 0
+    
+    def test_slo_viability_fails_bandwidth_minimum(self):
+        """Test when NRP doesn't meet minimum bandwidth requirement."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 500  # Less than required
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is False
+        assert score == 0
+    
+    def test_slo_viability_fails_delay_maximum(self):
+        """Test when NRP doesn't meet maximum delay requirement."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-delay-maximum",
+                "bound": 10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 15  # Greater than maximum allowed
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is False
+        assert score == 0
+    
+    def test_slo_viability_multiple_metrics_partial_failure(self):
+        """Test when one metric fails in a multi-metric comparison."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            },
+            {
+                "metric-type": "one-way-delay-maximum",
+                "bound": 10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 1500  # OK
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 15  # NOT OK
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is False
+        assert score == 0
+    
+    def test_slo_viability_flexibility_score_calculation(self):
+        """Test flexibility score calculation."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 2000  # 100% better than requirement
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        # Flexibility = (2000 - 1000) / 1000 = 1.0
+        assert score == 1.0
+    
+    def test_slo_viability_empty_slos(self):
+        """Test with empty SLO list."""
+        slice_slos = []
+        nrp_slos = {"slos": []}
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert score == 0
+    
+    def test_slo_viability_no_matching_metrics(self):
+        """Test when there are no matching metric types."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "two-way-bandwidth",
+                    "bound": 1500
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        # Should still return True as no metrics failed
+        assert viable is True
+        assert score == 0
+    
+    def test_slo_viability_packet_loss_maximum_type(self):
+        """Test packet loss as maximum constraint type."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-packet-loss",
+                "bound": 0.01  # 1% maximum acceptable
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-packet-loss",
+                    "bound": 0.005  # 0.5% NRP loss
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert score > 0
+
+
+class TestMapper:
+    """Tests for mapper function."""
+    
+    def test_mapper_with_nrp_disabled_and_planner_disabled(self, app_context, sample_ietf_intent):
+        """Test mapper when both NRP and Planner are disabled."""
+        app_context.config = {
+            "NRP_ENABLED": False,
+            "PLANNER_ENABLED": False
+        }
+        
+        result = mapper(sample_ietf_intent)
+        
+        assert result is None
+    
+    @patch('src.mapper.main.Planner')
+    def test_mapper_with_planner_enabled(self, mock_planner_class, app_context, sample_ietf_intent):
+        """Test mapper when Planner is enabled."""
+        app_context.config = {
+            "NRP_ENABLED": False,
+            "PLANNER_ENABLED": True,
+            "PLANNER_TYPE":"ENERGY"
+        }
+        
+        mock_planner_instance = MagicMock()
+        mock_planner_instance.planner.return_value = {"path": "node1->node2->node3"}
+        mock_planner_class.return_value = mock_planner_instance
+        
+        result = mapper(sample_ietf_intent)
+        
+        assert result == {"path": "node1->node2->node3"}
+        mock_planner_instance.planner.assert_called_once_with(sample_ietf_intent, "ENERGY")
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_enabled_finds_best_nrp(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper with NRP enabled finds the best NRP."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False,
+        }
+        
+        mock_realizer.return_value = sample_nrp_view
+        
+        result = mapper(sample_ietf_intent)
+        
+        # Verify realizer was called to READ NRP view
+        assert mock_realizer.call_args_list[0] == call(None, True, "READ")
+        assert result is None
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_enabled_no_viable_candidates(self, mock_realizer, app_context, sample_ietf_intent):
+        """Test mapper when no viable NRPs are found."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        # All NRPs are unavailable
+        nrp_view = [
+            {
+                "id": "nrp-1",
+                "available": False,
+                "slices": [],
+                "slos": [
+                    {
+                        "metric-type": "one-way-bandwidth",
+                        "bound": 500
+                    }
+                ]
+            }
+        ]
+        
+        mock_realizer.return_value = nrp_view
+        
+        result = mapper(sample_ietf_intent)
+        
+        assert result is None
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_enabled_creates_new_nrp(self, mock_realizer, app_context, sample_ietf_intent):
+        """Test mapper creates new NRP when no suitable candidate exists."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        # No viable NRPs
+        nrp_view = []
+        
+        mock_realizer.side_effect = [nrp_view, None]  # First call returns empty, second for CREATE
+        
+        result = mapper(sample_ietf_intent)
+        
+        # Verify CREATE was called
+        create_call = [c for c in mock_realizer.call_args_list if len(c[0]) > 2 and c[0][2] == "CREATE"]
+        assert len(create_call) > 0
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_and_planner_both_enabled(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper when both NRP and Planner are enabled."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": True,
+            "PLANNER_TYPE":"ENERGY"
+        }
+        
+        mock_realizer.return_value = sample_nrp_view
+        
+        with patch('src.mapper.main.Planner') as mock_planner_class:
+            mock_planner_instance = MagicMock()
+            mock_planner_instance.planner.return_value = {"path": "optimized_path"}
+            mock_planner_class.return_value = mock_planner_instance
+            
+            result = mapper(sample_ietf_intent)
+            
+            # Planner should be called and return the result
+            assert result == {"path": "optimized_path"}
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_updates_best_nrp_with_slice(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper updates best NRP with new slice."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        mock_realizer.return_value = sample_nrp_view
+        
+        result = mapper(sample_ietf_intent)
+        
+        # Verify UPDATE was called
+        update_calls = [c for c in mock_realizer.call_args_list if len(c[0]) > 2 and c[0][2] == "UPDATE"]
+        assert len(update_calls) > 0
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_extracts_slos_correctly(self, mock_realizer, app_context, sample_ietf_intent):
+        """Test that mapper correctly extracts SLOs from intent."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        mock_realizer.return_value = []
+        
+        mapper(sample_ietf_intent)
+        
+        # Verify the function processed the intent
+        assert mock_realizer.called
+    
+    @patch('src.mapper.main.logging')
+    def test_mapper_logs_debug_info(self, mock_logging, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper logs debug information."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        with patch('src.mapper.main.realizer') as mock_realizer:
+            mock_realizer.return_value = sample_nrp_view
+            
+            mapper(sample_ietf_intent)
+            
+            # Verify debug logging was called
+            assert mock_logging.debug.called
+
+
+class TestMapperIntegration:
+    """Integration tests for mapper functionality."""
+    
+    def test_mapper_complete_nrp_workflow(self, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test complete NRP mapping workflow."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        with patch('src.mapper.main.realizer') as mock_realizer:
+            mock_realizer.return_value = sample_nrp_view
+            
+            result = mapper(sample_ietf_intent)
+            
+            # Verify the workflow sequence
+            assert mock_realizer.call_count >= 1
+            first_call = mock_realizer.call_args_list[0]
+            assert first_call[0][1] is True  # need_nrp parameter
+            assert first_call[0][2] == "READ"  # READ operation
+    
+    def test_mapper_complete_planner_workflow(self, app_context, sample_ietf_intent):
+        """Test complete Planner workflow."""
+        app_context.config = {
+            "NRP_ENABLED": False,
+            "PLANNER_ENABLED": True,
+            "PLANNER_TYPE":"ENERGY"
+        }
+        
+        expected_path = {
+            "path": "node1->node2->node3",
+            "cost": 10,
+            "latency": 5
+        }
+        
+        with patch('src.mapper.main.Planner') as mock_planner_class:
+            mock_planner_instance = MagicMock()
+            mock_planner_instance.planner.return_value = expected_path
+            mock_planner_class.return_value = mock_planner_instance
+            
+            result = mapper(sample_ietf_intent)
+            
+            assert result == expected_path
+            mock_planner_instance.planner.assert_called_once()
+    
+    def test_mapper_with_invalid_nrp_response(self, app_context, sample_ietf_intent):
+        """Test mapper behavior with invalid NRP response."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        # Invalid NRP without expected fields
+        invalid_nrp = {
+            "id": "nrp-invalid"
+            # Missing 'available' and 'slos' fields
+        }
+        
+        with patch('src.mapper.main.realizer') as mock_realizer:
+            mock_realizer.return_value = [invalid_nrp]
+            
+            # Should handle gracefully
+            try:
+                result = mapper(sample_ietf_intent)
+            except (KeyError, TypeError):
+                # Expected to fail gracefully
+                pass
+    
+    def test_mapper_with_missing_slos_in_intent(self, app_context):
+        """Test mapper behavior when intent has no SLOs."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        invalid_intent = {
+            "ietf-network-slice-service:network-slice-services": {
+                "slice-service": [{
+                    "id": "slice-1"
+                }],
+                "slo-sle-templates": {
+                    "slo-sle-template": [{
+                        "id": "profile1",
+                        "slo-policy": {
+                            # No metric-bound key
+                        }
+                    }]
+                }
+            }
+        }
+        
+        try:
+            mapper(invalid_intent)
+        except (KeyError, TypeError):
+            # Expected behavior
+            pass
+
+
+class TestSloViabilityEdgeCases:
+    """Edge case tests for slo_viability function."""
+    
+    def test_slo_viability_with_zero_bound(self):
+        """Test handling of zero bounds in SLO."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 0
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 100
+                }
+            ]
+        }
+        
+        # Should handle zero division gracefully or fail as expected
+        try:
+            viable, score = slo_viability(slice_slos, nrp_slos)
+        except (ZeroDivisionError, ValueError):
+            pass
+    
+    def test_slo_viability_with_very_large_bounds(self):
+        """Test handling of very large SLO bounds."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1e10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 2e10
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert isinstance(score, (int, float))
+    
+    def test_slo_viability_all_delay_types(self):
+        """Test handling of all delay metric t ypes."""
+        delay_types = [
+            "one-way-delay-maximum",
+            "two-way-delay-maximum",
+            "one-way-delay-percentile",
+            "two-way-delay-percentile",
+            "one-way-delay-variation-maximum",
+            "two-way-delay-variation-maximum"
+        ]
+        
+        for delay_type in delay_types:
+            slice_slos = [{"metric-type": delay_type, "bound": 10}]
+            nrp_slos = {"slos": [{"metric-type": delay_type, "bound": 8}]}
+            
+            viable, score = slo_viability(slice_slos, nrp_slos)
+            
+            assert viable is True
+            assert score >= 0
\ No newline at end of file
diff --git a/src/tests/test_nbi_processor.py b/src/tests/test_nbi_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef1349425fba9d6756a4b9af122ba58b4a5ff41b
--- /dev/null
+++ b/src/tests/test_nbi_processor.py
@@ -0,0 +1,222 @@
+import pytest
+from unittest.mock import patch
+from src.nbi_processor.detect_format import detect_format
+from src.nbi_processor.main import nbi_processor
+from src.nbi_processor.translator import translator
+
+
+# ---------- Tests detect_format ----------
+
+def test_detect_format_ietf():
+    data = {"ietf-network-slice-service:network-slice-services": {}}
+    assert detect_format(data) == "IETF"
+
+def test_detect_format_3gpp_variants():
+    assert detect_format({"RANSliceSubnet1": {}}) == "3GPP"
+    assert detect_format({"NetworkSlice1": {}}) == "3GPP"
+    assert detect_format({"TopSliceSubnet1": {}}) == "3GPP"
+    assert detect_format({"CNSliceSubnet1": {}}) == "3GPP"
+
+def test_detect_format_none():
+    assert detect_format({"foo": "bar"}) is None
+
+
+# ---------- Fixtures ----------
+
+@pytest.fixture
+def ietf_intent():
+    return {"ietf-network-slice-service:network-slice-services": {"foo": "bar"}}
+
+@pytest.fixture
+def gpp_intent():
+    # Estructura mínima consistente con translator
+    return {
+        "RANSliceSubnet1": {
+            "networkSliceSubnetRef": ["subnetA", "subnetB"]
+        },
+        "subnetA": {
+            "EpTransport": ["EpTransport ep1", "EpTransport ep2"],
+            "SliceProfileList": [{
+                "RANSliceSubnetProfile": {
+                    "dLThptPerSliceSubnet": {
+                        "GuaThpt": 1,
+                        "MaxThpt": 2
+                    },
+                    "uLThptPerSliceSubnet": {
+                        "GuaThpt": 1,
+                        "MaxThpt": 2
+                    },
+                    "dLLatency": 20,
+                    "uLLatency": 20
+                }
+            }],
+        },
+        "subnetB": {
+            "EpTransport": ["EpTransport ep3", "EpTransport ep4"],
+        },
+        "EpTransport ep1": {
+            "qosProfile": "qosA",
+            "EpApplicationRef": ["EP_N2 epRef1"],
+            "logicalInterfaceInfo": {"logicalInterfaceType": "typeA", "logicalInterfaceId": "idA"},
+            "IpAddress": "1.1.1.1",
+            "NextHopInfo": "NH1",
+        },
+        "EpTransport ep2": {
+            "qosProfile": "qosB",
+            "EpApplicationRef": ["EP_N2 epRef2"],
+            "logicalInterfaceInfo": {"logicalInterfaceType": "typeB", "logicalInterfaceId": "idB"},
+            "IpAddress": "2.2.2.2",
+            "NextHopInfo": "NH2",
+        },
+        "EP_N2 epRef1": {"localAddress": "10.0.0.1", "remoteAddress": "11.1.1.1", "epTransportRef": "ep1"},
+        "EP_N2 epRef2": {"localAddress": "10.0.0.2", "remoteAddress": "11.1.1.2", "epTransportRef": "ep2"},
+        "EpTransport ep3": {"qosProfile": "qosC", "EpApplicationRef": ["EP_N2 epRef3"], "logicalInterfaceInfo": {"logicalInterfaceType": "typeC", "logicalInterfaceId": "idC"}, "IpAddress": "3.3.3.3", "NextHopInfo": "NH3"},
+        "EpTransport ep4": {"qosProfile": "qosD", "EpApplicationRef": ["EP_N2 epRef4"], "logicalInterfaceInfo": {"logicalInterfaceType": "typeD", "logicalInterfaceId": "idD"}, "IpAddress": "4.4.4.4", "NextHopInfo": "NH4"},
+        "EP_N2 epRef3": {"localAddress": "10.0.0.3", "remoteAddress": "11.1.1.3", "epTransportRef": "ep3"},
+        "EP_N2 epRef4": {"localAddress": "10.0.0.4", "remoteAddress": "11.1.1.4", "epTransportRef": "ep4"},
+    }
+
+
+@pytest.fixture
+def fake_template():
+    # Plantilla mínima para que el traductor funcione
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {"id": "", "slo-policy": {"metric-bound": []}}
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "",
+                    "description": "",
+                    "slo-sle-policy": {},
+                    "sdps": {"sdp": [
+                        {"service-match-criteria": {"match-criterion": [{}]}, "attachment-circuits": {"attachment-circuit": [{"sdp-peering": {}}]}},
+                        {"service-match-criteria": {"match-criterion": [{}]}, "attachment-circuits": {"attachment-circuit": [{"sdp-peering": {}}]}}
+                    ]},
+                    "connection-groups": {"connection-group": [{}]},
+                }
+            ],
+        }
+    }
+
+
+# ---------- Tests nbi_processor ----------
+
+def test_nbi_processor_ietf(ietf_intent):
+    result = nbi_processor(ietf_intent)
+    assert isinstance(result, list)
+    assert result[0] == ietf_intent
+
+@patch("src.nbi_processor.main.translator")
+def test_nbi_processor_3gpp(mock_translator, gpp_intent):
+    mock_translator.return_value = {"ietf-network-slice-service:network-slice-services": {}}
+    result = nbi_processor(gpp_intent)
+    assert isinstance(result, list)
+    assert len(result) == 2  # Dos subnets procesados
+    assert all("ietf-network-slice-service:network-slice-services" in r for r in result)
+
+def test_nbi_processor_unrecognized():
+    with pytest.raises(ValueError):
+        nbi_processor({"foo": "bar"})
+
+def test_nbi_processor_empty():
+    with pytest.raises(ValueError):
+        nbi_processor({})
+
+
+# ---------- Tests translator ----------
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_basic(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    result = translator(gpp_intent, "subnetA")
+
+    assert isinstance(result, dict)
+    assert "ietf-network-slice-service:network-slice-services" in result
+
+    slice_service = result["ietf-network-slice-service:network-slice-services"]["slice-service"][0]
+    assert slice_service["id"].startswith("slice-service-")
+    assert "description" in slice_service
+    assert slice_service["slo-sle-policy"]["slo-sle-template"] == "qosA"  # viene del ep1
+
+import re
+import uuid
+
+
+# ---------- Extra detect_format ----------
+
+@pytest.mark.parametrize("data", [
+    None,
+    [],
+    "",
+    123,
+])
+def test_detect_format_invalid_types(data):
+    assert detect_format(data if isinstance(data, dict) else {}) in (None, "IETF", "3GPP")
+
+
+def test_detect_format_multiple_keys():
+    # Si tiene IETF y 3GPP, debe priorizar IETF
+    data = {
+        "ietf-network-slice-service:network-slice-services": {},
+        "RANSliceSubnet1": {}
+    }
+    assert detect_format(data) == "IETF"
+
+
+# ---------- Extra nbi_processor ----------
+
+def test_nbi_processor_gpp_missing_refs(gpp_intent):
+    # Quitar networkSliceSubnetRef debería provocar ValueError en translator loop
+    broken = gpp_intent.copy()
+    broken["RANSliceSubnet1"] = {}  # no tiene "networkSliceSubnetRef"
+    with pytest.raises(KeyError):
+        nbi_processor(broken)
+
+
+# ---------- Extra translator ----------
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_maps_metrics(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    result = translator(gpp_intent, "subnetA")
+
+    metrics = result["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+    metric_types = {m["metric-type"] for m in metrics}
+    assert "one-way-delay-maximum" in metric_types
+    assert "one-way-bandwidth" in metric_types
+
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_empty_profile(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    gpp_intent["subnetA"]["SliceProfileList"] = [{}]  # vacío
+    result = translator(gpp_intent, "subnetA")
+    metrics = result["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+    assert metrics == []  # no debería añadir nada
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_sdps_are_populated(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    result = translator(gpp_intent, "subnetA")
+    slice_service = result["ietf-network-slice-service:network-slice-services"]["slice-service"][0]
+
+    sdp0 = slice_service["sdps"]["sdp"][0]
+    assert sdp0["node-id"] == "ep1"
+    assert re.match(r"^\d+\.\d+\.\d+\.\d+$", sdp0["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"])
+    assert "target-connection-group-id" in sdp0["service-match-criteria"]["match-criterion"][0]
+
+    sdp1 = slice_service["sdps"]["sdp"][1]
+    assert sdp1["node-id"] == "ep2"
+    assert sdp1["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"].startswith("NH")
+
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_with_single_endpoint_should_fail(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    gpp_intent["subnetA"]["EpTransport"] = ["EpTransport ep1"]  # solo uno
+    with pytest.raises(IndexError):
+        translator(gpp_intent, "subnetA")
diff --git a/src/tests/test_utils.py b/src/tests/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b887a497180777fb9fdd86ca5f04d57c5b4c77a
--- /dev/null
+++ b/src/tests/test_utils.py
@@ -0,0 +1,182 @@
+import json
+import pytest
+import os
+
+from src.utils.load_template import load_template
+from src.utils.dump_templates import dump_templates
+from src.utils.send_response import send_response
+from src.utils.build_response import build_response
+from flask import Flask
+
+@pytest.fixture
+def tmp_json_file(tmp_path):
+    """Crea un archivo JSON temporal válido y devuelve su ruta y contenido."""
+    data = {"name": "test"}
+    file_path = tmp_path / "template.json"
+    file_path.write_text(json.dumps(data))
+    return file_path, data
+
+
+def test_load_template_ok(tmp_json_file):
+    """Debe cargar correctamente un JSON válido."""
+    file_path, expected = tmp_json_file
+    result = load_template(str(file_path))
+    assert result == expected
+
+
+def test_load_template_invalid(tmp_path):
+    """Debe devolver un response con error si el JSON es inválido."""
+    bad_file = tmp_path / "bad.json"
+    bad_file.write_text("{invalid json}")
+
+    result, code = load_template(str(bad_file))
+    assert code == 500
+    assert result["success"] is False
+    assert "Template loading error" in result["error"]
+
+def test_dump_templates_enabled(monkeypatch, tmp_path):
+    """Debe volcar múltiples JSON correctamente en src/templates si DUMP_TEMPLATES está activado."""
+    templates_dir = tmp_path / "src" / "templates"
+    templates_dir.mkdir(parents=True)
+
+    monkeypatch.setattr("src.utils.dump_templates.TEMPLATES_PATH", str(templates_dir))
+
+    app = Flask(__name__)
+    app.config["DUMP_TEMPLATES"] = True
+
+    with app.app_context():
+        nbi = {"nbi": 1}
+        ietf = {"ietf": 2}
+        realizer = {"realizer": 3}
+
+        dump_templates(nbi, ietf, realizer)
+
+    for name, data in [("nbi_template.json", nbi), ("ietf_template.json", ietf), ("realizer_template.json", realizer)]:
+        file_path = templates_dir / name
+        assert file_path.exists()
+        assert json.loads(file_path.read_text()) == data
+
+def test_dump_templates_disabled(monkeypatch, tmp_path):
+    """No debe escribir nada en src/templates si DUMP_TEMPLATES está desactivado."""
+    templates_dir = tmp_path / "src" / "templates"
+    templates_dir.mkdir(parents=True)
+
+    monkeypatch.setattr("src.utils.dump_templates.TEMPLATES_PATH", str(templates_dir))
+
+    app = Flask(__name__)
+    app.config["DUMP_TEMPLATES"] = False
+
+    with app.app_context():
+        dump_templates({"nbi": 1}, {"ietf": 2}, {"realizer": 3})
+
+    for name in ["nbi_template.json", "ietf_template.json", "realizer_template.json"]:
+        assert not (templates_dir / name).exists()
+
+def test_send_response_success():
+    """Debe devolver success=True y code=200 si el resultado es True."""
+    resp, code = send_response(True, data={"k": "v"})
+    assert code == 200
+    assert resp["success"] is True
+    assert resp["data"]["k"] == "v"
+    assert resp["error"] is None
+
+
+def test_send_response_error():
+    """Debe devolver success=False y code=400 si el resultado es False."""
+    resp, code = send_response(False, message="fallo")
+    assert code == 400
+    assert resp["success"] is False
+    assert resp["data"] is None
+    assert "fallo" in resp["error"]
+
+def ietf_intent():
+    """Intento válido en formato IETF simplificado."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {
+                        "id": "qos1",
+                        "slo-policy": {
+                            "metric-bound": [
+                                {
+                                    "metric-type": "one-way-bandwidth",
+                                    "metric-unit": "kbps",
+                                    "bound": 1000
+                                }
+                            ],
+                            "availability": 99.9,
+                            "mtu": 1500
+                        }
+                    }
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "slice-test-1",
+                    "sdps": {
+                        "sdp": [
+                            {
+                                "id": "CU",
+                                "sdp-ip-address": "10.0.0.1",
+                                "service-match-criteria": {
+                                    "match-criterion": [{"match-type": "vlan", "value": "100"}]
+                                },
+                            },
+                            {
+                                "id": "DU",
+                                "sdp-ip-address": "10.0.0.2",
+                                "service-match-criteria": {
+                                    "match-criterion": [{"match-type": "vlan", "value": "100"}]
+                                },
+                            },
+                        ]
+                    },
+                }
+            ],
+        }
+    }
+
+
+def test_build_response_ok():
+    """Debe construir correctamente el response a partir de un intent IETF válido."""
+    intent = ietf_intent()
+    response = []
+    result = build_response(intent, response)
+
+    assert isinstance(result, list)
+    assert len(result) == 1
+
+    slice_data = result[0]
+    assert slice_data["id"] == "slice-test-1"
+    assert slice_data["source"] == "CU"
+    assert slice_data["destination"] == "DU"
+    assert slice_data["vlan"] == "100"
+
+    # Validar constraints
+    requirements = slice_data["requirements"]
+    assert any(r["constraint_type"] == "one-way-bandwidth[kbps]" and r["constraint_value"] == "1000" for r in requirements)
+    assert any(r["constraint_type"] == "availability[%]" and r["constraint_value"] == "99.9" for r in requirements)
+    assert any(r["constraint_type"] == "mtu[bytes]" and r["constraint_value"] == "1500" for r in requirements)
+
+
+def test_build_response_empty_policy():
+    """Debe devolver lista sin constraints si slo-policy está vacío."""
+    intent = ietf_intent()
+    intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"] = {}
+    response = []
+    result = build_response(intent, response)
+
+    assert isinstance(result, list)
+    assert len(result[0]["requirements"]) == 0
+
+
+def test_build_response_invalid_intent():
+    """Debe fallar limpiamente si el intent no tiene la estructura esperada."""
+    bad_intent = {}
+    response = []
+    try:
+        result = build_response(bad_intent, response)
+    except Exception:
+        result = []
+    assert result == []
diff --git a/src/utils/build_response.py b/src/utils/build_response.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7ddba0be77fa9fd103e9bc1a12b82fd7b62cd88
--- /dev/null
+++ b/src/utils/build_response.py
@@ -0,0 +1,90 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from .safe_get import safe_get
+
+def build_response(intent, response, controller_type = None):
+    """
+    Build a structured response from network slice intent.
+    
+    Extracts key information from an IETF network slice intent and formats it
+    into a standardized response structure with slice details and QoS requirements.
+
+    Args:
+        intent (dict): IETF network slice service intent containing:
+            - slice-service: Service configuration with SDPs and IDs
+            - slo-sle-templates: QoS policy templates
+        response (list): Existing response list to append to
+        controller_type (str, optional): Type of controller managing the slice.
+                                        Defaults to None
+
+    Returns:
+        list: Updated response list with appended slice information containing:
+            - id: Slice service identifier
+            - source: Source service delivery point ID
+            - destination: Destination service delivery point ID
+            - vlan: VLAN identifier from match criteria
+            - requirements: List of QoS constraint dictionaries with:
+                * constraint_type: Metric type and unit (e.g., "latency[ms]")
+                * constraint_value: Bound value as string
+                
+    Notes:
+        - Extracts metric bounds from SLO policy (bandwidth, delay, jitter, etc.)
+        - Includes availability and MTU if specified in SLO policy
+        - Assumes point-to-point topology with exactly 2 SDPs
+        - VLAN extracted from first SDP's first match criterion
+    """
+    
+    id = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"id"])
+    source = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"sdps","sdp",0,"id"])
+    destination = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"sdps","sdp",1,"id"])
+    vlan = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"sdps","sdp",0,"service-match-criteria","match-criterion",0,"value"])
+
+    qos_requirements = []
+
+    # Populate response with QoS requirements and VLAN from intent
+    slo_policy = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slo-sle-templates","slo-sle-template",0,"slo-policy"])
+
+    # Process metrics
+    for metric in slo_policy.get("metric-bound", []):
+        constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
+        constraint_value = str(metric["bound"])
+        qos_requirements.append({
+            "constraint_type": constraint_type,
+            "constraint_value": constraint_value
+        })
+
+    # Availability
+    if "availability" in slo_policy:
+        qos_requirements.append({
+            "constraint_type": "availability[%]",
+            "constraint_value": str(slo_policy["availability"])
+        })
+
+    # MTU
+    if "mtu" in slo_policy:
+        qos_requirements.append({
+            "constraint_type": "mtu[bytes]",
+            "constraint_value": str(slo_policy["mtu"])
+        })
+    response.append({
+        "id": id,
+        "source": source,
+        "destination": destination,
+        "vlan": vlan,
+        "requirements": qos_requirements,
+    })
+    return response
\ No newline at end of file
diff --git a/src/utils/dump_templates.py b/src/utils/dump_templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3fbb444792bef2ed3e6eaa0002a569619951cba
--- /dev/null
+++ b/src/utils/dump_templates.py
@@ -0,0 +1,64 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import json, os
+from src.config.constants import TEMPLATES_PATH
+from flask import current_app
+
+def dump_templates(nbi_file, ietf_file, realizer_file):
+    """
+    Dump multiple template files as JSON for debugging and analysis.
+    
+    This utility function saves network slice templates at different processing
+    stages to disk for inspection, debugging, and documentation purposes.
+    Only executes if DUMP_TEMPLATES configuration flag is enabled.
+
+    Args:
+        nbi_file (dict): Northbound Interface template - original user/API request
+        ietf_file (dict): IETF-standardized network slice intent format
+        realizer_file (dict): Controller-specific realization template
+        
+    Returns:
+        None
+        
+    Notes:
+        - Controlled by DUMP_TEMPLATES configuration flag
+        - Files saved to TEMPLATES_PATH directory
+        - Output files:
+          * nbi_template.json - Original NBI request
+          * ietf_template.json - Standardized IETF format
+          * realizer_template.json - Controller-specific format
+        - JSON formatted with 2-space indentation for readability
+        - Silently returns if DUMP_TEMPLATES is False
+        
+    Raises:
+        IOError: If unable to write to TEMPLATES_PATH directory
+    """
+    if not current_app.config["DUMP_TEMPLATES"]:
+        return
+
+    # Map template content to output filenames
+    templates = {
+        "nbi_template.json": nbi_file,
+        "ietf_template.json": ietf_file,
+        "realizer_template.json": realizer_file,
+    }
+
+    # Write each template to disk
+    for filename, content in templates.items():
+        path = os.path.join(TEMPLATES_PATH, filename)
+        with open(path, "w") as f:
+            json.dump(content, f, indent=2)
\ No newline at end of file
diff --git a/src/utils/load_template.py b/src/utils/load_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd2cd8bb44cccce0f0a216665f5a8251a3ba504c
--- /dev/null
+++ b/src/utils/load_template.py
@@ -0,0 +1,42 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, json
+from .send_response import send_response
+
+def load_template(dir_t):
+    """
+    Load and process JSON templates for different network slice formats.
+
+    Args:
+        dir_t (str): Path to the template file
+
+    Returns:
+        dict: Parsed JSON template
+    """
+    try:
+        with open(dir_t, "r") as source:
+            template = json.loads(
+                source.read()
+                .replace("\t", "")
+                .replace("\n", "")
+                .replace("'", '"')
+                .strip()
+            )
+        return template
+    except Exception as e:
+        logging.error(f"Template loading error: {e}")
+        return send_response(False, code=500, message=f"Template loading error: {e}")
\ No newline at end of file
diff --git a/src/utils/safe_get.py b/src/utils/safe_get.py
new file mode 100644
index 0000000000000000000000000000000000000000..c02b1bfb5d20a3be562d2e2d9faea3f6a4308dae
--- /dev/null
+++ b/src/utils/safe_get.py
@@ -0,0 +1,33 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+def safe_get(dct, keys):
+    """
+    Safely retrieves a nested value from a dictionary or list.
+    Args:
+        dct (dict or list): The dictionary or list to traverse.
+        keys (list): A list of keys (for dicts) or indices (for lists) to follow.
+    Returns:
+        The value found at the nested location, or None if any key/index is not found.
+    """
+    for key in keys:
+        if isinstance(dct, dict) and key in dct:
+            dct = dct[key]
+        elif isinstance(dct, list) and isinstance(key, int) and key < len(dct):
+            dct = dct[key]
+        else:
+            return None
+    return dct
diff --git a/src/utils/send_response.py b/src/utils/send_response.py
new file mode 100644
index 0000000000000000000000000000000000000000..30c4433fa4eb8933962838584b81c43a2968bda6
--- /dev/null
+++ b/src/utils/send_response.py
@@ -0,0 +1,54 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, inspect
+
+def send_response(result, message=None, code=None, data=None):
+    """
+    Generate and send a standardized API response.
+
+    Args:
+        result (bool): Indicates success or failure. Defaults to None.
+        message (str, optional): Message (success or error). Defaults to None.
+        code (int, optional): HTTP code (default 200 for success, 400 for error). Defaults to None.
+        data (dict, optional): Additional payload. Defaults to None.
+
+    Returns:
+        tuple: (response_dict, http_status_code)
+    """
+    
+    frame = inspect.currentframe().f_back
+    filename = frame.f_code.co_filename
+    lineno = frame.f_lineno
+
+    if result:
+        code = code or 200
+        response = {
+            "success": True,
+            "data": data or {},
+            "error": None,
+        }
+    else:
+        code = code or 400
+        error_info = f"{message or 'An error occurred while processing the request.'} (File: {filename}, Line: {lineno})"
+        logging.warning(f"Request failed. Reason: {message}")
+        response = {
+            "success": False,
+            "data": None,
+            "error": error_info,
+        }
+
+    return response, code
\ No newline at end of file
diff --git a/src/webui/gui.py b/src/webui/gui.py
index 1afb831c6410b8a58d51c1b077a86c4a665db33d..49d8712e9fcfb2f368efd9d302a2e70655811015 100644
--- a/src/webui/gui.py
+++ b/src/webui/gui.py
@@ -18,12 +18,11 @@ import json, logging, uuid
 import requests
 import os
 import pandas as pd
-from flask import Flask, render_template, request, jsonify, redirect, url_for, session, Blueprint
-from collections import OrderedDict
-from src.Constants import SRC_PATH, NSC_PORT, TEMPLATES_PATH, DUMMY_MODE
-from src.realizers.ixia.NEII_V4 import NEII_controller
+from flask import render_template, request, jsonify, redirect, url_for, session, Blueprint
+from src.config.constants import SRC_PATH, NSC_PORT, TEMPLATES_PATH
+from src.realizer.ixia.helpers.NEII_V4 import NEII_controller
+from flask import current_app
 
-# app =Flask(__name__)
 gui_bp = Blueprint('gui', __name__, template_folder=os.path.join(SRC_PATH, 'webui', 'templates'), static_folder=os.path.join(SRC_PATH, 'webui', 'static'), static_url_path='/webui/static')
 
 #Variables for dev accessing
@@ -32,6 +31,15 @@ PASSWORD = 'admin'
 enter=False
 
 def __safe_int(value):
+    """
+    Safely convert a string or numeric input to int or float.
+    
+    Args:
+        value (str|int|float): The input value to convert.
+    
+    Returns:
+        int|float|None: The converted integer or float value, or None if conversion fails.
+    """
     try:
         if isinstance(value, str):
             value = value.strip().replace(',', '.')
@@ -41,9 +49,13 @@ def __safe_int(value):
         return None
 
 def __build_request_ietf(src_node_ip=None, dst_node_ip=None, vlan_id=None, bandwidth=None, latency=None, tolerance=0, latency_version=None, reliability=None):
-    '''
-    Work: Build the IETF template for the intent
-    '''
+    """
+    Build an IETF-compliant network slice request formm from inputs.
+    
+    Args: IPs, VLAN, bandwidth, latency, reliability, etc.
+
+    Returns: dict representing the JSON request.
+    """
     # Open and read the template file
     with open(os.path.join(TEMPLATES_PATH, 'ietf_template_empty.json'), 'r') as source:
         # Clean up the JSON template
@@ -109,9 +121,11 @@ def __build_request(ip_version=None, src_node_ip=None, dst_node_ip=None, src_nod
                     reliability=None, packet_reorder=None, num_pack=None, pack_reorder=None, num_reorder=None,
                     max_reorder=None, desv_reorder=None, drop_version=None, packets_drop=None,
                     drops=None, desv_drop=None):
-    '''
-    Work: Build the template for the IXIA NEII
-    '''
+    """
+    Build a JSON request formm from inputs.
+    Args: IPs, VLAN, bandwidth, latency, reliability, etc.
+    Returns: dict representing the JSON request.
+    """
     json_data = {
         "ip_version": ip_version,
         "src_node_ip": src_node_ip,
@@ -138,10 +152,14 @@ def __build_request(ip_version=None, src_node_ip=None, dst_node_ip=None, src_nod
     return json_data
 
 def __datos_json():
+    """
+    Read slice data from JSON file and return as a pandas DataFrame.
+    Returns:
+        pd.DataFrame: DataFrame containing slice data.
+    """
     try:
         with open(os.path.join(SRC_PATH, 'slice_ddbb.json'), 'r') as fichero:
             datos =json.load(fichero)
-            print(datos)
             rows =[]
             for source_ip, source_info in datos["source"].items():
                 vlan = source_info["vlan"]
@@ -165,10 +183,8 @@ def home():
     session['enter'] = False
     # Leer las IPs actuales del archivo de configuración
     try:
-        with open(os.path.join(SRC_PATH, 'IPs.json')) as f:
-            ips = json.load(f)
-            tfs_ip = ips.get('TFS_IP', 'No configurada')
-            ixia_ip = ips.get('IXIA_IP', 'No configurada')
+        tfs_ip = current_app.config["TFS_IP"]
+        ixia_ip = current_app.config["IXIA_IP"]
     except Exception:
         tfs_ip = 'No configurada'
         ixia_ip = 'No configurada'
@@ -292,7 +308,7 @@ def develop():
 
         json_data = __build_request(ip_version=ip_version, src_node_ip=src_node_ipv4, dst_node_ip=dst_node_ipv4, src_node_ipv6=src_node_ipv6, dst_node_ipv6=dst_node_ipv6, vlan_id=vlan_id, latency=latency, bandwidth=bandwidth, latency_version=latency_version, tolerance=tolerance, packet_reorder=packet_reorder, num_pack=num_pack, pack_reorder=pack_reorder, num_reorder=num_reorder, max_reorder=max_reorder, desv_reorder=desv_reorder, drop_version=drop_version, packets_drop=packets_drop, drops=drops, desv_drop=desv_drop)
         logging.debug("Generated JSON data: %s", json_data)
-        if not DUMMY_MODE:
+        if not current_app.config["DUMMY_MODE"]:
             NEII_controller().nscNEII(json_data)
 
         session['enter'] = True
@@ -350,14 +366,14 @@ def search():
         response.raise_for_status()
         ixia_slices = response.json()
 
-        # Combinar los slices de TFS e IXIA
+        # Combine slices from both controllers
         slices = tfs_slices + ixia_slices
        
     except requests.RequestException as e:
         logging.error("Error fetching slices: %s", e)
         return render_template('search.html', error="No se pudieron obtener los slices.", dataframe_html="")
 
-    # Extraer datos relevantes y construir un DataFrame
+    # Extract relevant data for DataFrame
     rows = []
     for item in slices:
         try:
@@ -370,7 +386,7 @@ def search():
             vlan = sdp[0]["service-match-criteria"]["match-criterion"][0]["value"]
             controller = item["controller"]
 
-            # Construir atributos dinámicamente
+            # Build attributes list
             attributes = []
             for metric in metric_bound:
                 if metric.get("metric-type", "") == "one-way-bandwidth":
@@ -454,21 +470,25 @@ def update_ips():
     tfs_ip = data.get('tfs_ip')
     ixia_ip = data.get('ixia_ip')
 
-    # Cargar datos existentes si el archivo existe
+    # Load existing IPs from the configuration file
     config_path = os.path.join(SRC_PATH, 'IPs.json')
     if os.path.exists(config_path):
         with open(config_path) as f:
             ips = json.load(f)
+        ips = {
+            "TFS_IP": current_app.config["TFS_IP"],
+            "IXIA_IP": current_app.config["IXIA_IP"]
+        }
     else:
         ips = {"TFS_IP": "", "IXIA_IP": ""}
 
-    # Actualizar solo los campos recibidos
+    # Update IPs if provided
     if tfs_ip:
         ips['TFS_IP'] = tfs_ip
     if ixia_ip:
         ips['IXIA_IP'] = ixia_ip
 
-    # Guardar de nuevo el archivo con los valores actualizados
+    # Save updated IPs back to the file
     with open(config_path, 'w') as f:
         json.dump(ips, f, indent=4)
 
diff --git a/swagger/E2E_namespace.py b/swagger/E2E_namespace.py
new file mode 100644
index 0000000000000000000000000000000000000000..a53cd0de84a58344d9a5401a16bd04739179a6cf
--- /dev/null
+++ b/swagger/E2E_namespace.py
@@ -0,0 +1,153 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (E2E) (https://E2E.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from flask import request
+from flask_restx import Namespace, Resource, reqparse
+from src.main import NSController
+from src.api.main import Api
+import json
+from swagger.models.create_models import create_gpp_nrm_28541_model, create_ietf_network_slice_nbi_yang_model
+
+e2e_ns = Namespace(
+    "E2E",
+    description="Operations related to transport network slices with E2E Orchestrator"
+)
+
+
+# 3GPP NRM TS28.541 Data models
+gpp_network_slice_request_model = create_gpp_nrm_28541_model(e2e_ns)
+
+# IETF draft-ietf-teas-ietf-network-slice-nbi-yang Data models
+
+slice_ddbb_model, slice_response_model = create_ietf_network_slice_nbi_yang_model(e2e_ns)
+
+upload_parser = reqparse.RequestParser()
+upload_parser.add_argument('file', location='files', type='FileStorage', help="File to upload")
+upload_parser.add_argument('json_data', location='form', help="JSON Data in string format")
+
+# Namespace Controllers
+@e2e_ns.route("/slice")
+class E2ESliceList(Resource):
+    @e2e_ns.doc(summary="Return all transport network slices", description="Returns all transport network slices from the slice controller.")
+    @e2e_ns.response(200, "Slices returned", slice_ddbb_model)
+    @e2e_ns.response(404, "Transport network slices not found")
+    @e2e_ns.response(500, "Internal server error")
+    def get(self):
+        """Retrieve all slices"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).get_flows()
+        return data, code
+    
+    @e2e_ns.doc(summary="Submit a transport network slice request", description="This endpoint allows clients to submit transport network slice requests using a JSON payload.")
+    @e2e_ns.response(201,"Slice created successfully", slice_response_model)
+    @e2e_ns.response(200, "No service to process.")
+    @e2e_ns.response(400, "Invalid request format")
+    @e2e_ns.response(500, "Internal server error")
+    @e2e_ns.expect(upload_parser)
+    def post(self):
+        """Submit a new slice request with a file"""
+
+        json_data = None
+
+        # Try to get the JSON data from the uploaded file
+        uploaded_file = request.files.get('file')
+        if uploaded_file:
+            if not uploaded_file.filename.endswith('.json'):
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "Only JSON files allowed"
+                }, 400
+            
+            try:
+                json_data = json.load(uploaded_file)  # Convert file to JSON
+            except json.JSONDecodeError:
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "JSON file not valid"
+                }, 400
+
+        # If no file was uploaded, try to get the JSON data from the form
+        if json_data is None:
+            raw_json = request.form.get('json_data')
+            if raw_json:
+                try:
+                    json_data = json.loads(raw_json)  # Convert string to JSON
+                except json.JSONDecodeError:
+                    return {
+                        "success": False,
+                        "data": None,
+                        "error": "JSON file not valid"
+                    }, 400
+        
+        # If no JSON data was found, return an error
+        if json_data is None:
+            return {
+                    "success": False,
+                    "data": None,
+                    "error": "No data sent"
+                }, 400
+
+        # Process the JSON data with the NSController
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).add_flow(json_data)
+        return data, code
+    
+    @e2e_ns.doc(summary="Delete all transport network slices", description="Deletes all transport network slices from the slice controller.")
+    @e2e_ns.response(204, "All transport network slices deleted successfully.")
+    @e2e_ns.response(500, "Internal server error")
+    def delete(self):
+        """Delete all slices"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).delete_flows()
+        return data, code
+
+
+@e2e_ns.route("/slice/")
+@e2e_ns.doc(params={"slice_id": "The ID of the slice to retrieve or modify"})
+class E2ESlice(Resource):
+    @e2e_ns.doc(summary="Return a specific transport network slice", description="Returns specific information related to a slice by providing its id")
+    @e2e_ns.response(200, "Slice returned", slice_ddbb_model)
+    @e2e_ns.response(404, "Transport network slice not found.")
+    @e2e_ns.response(500, "Internal server error")
+    def get(self, slice_id):
+        """Retrieve a specific slice"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).get_flows(slice_id)
+        return data, code
+
+    @e2e_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
+    @e2e_ns.response(204, "Transport network slice deleted successfully.")
+    @e2e_ns.response(404, "Transport network slice not found.")
+    @e2e_ns.response(500, "Internal server error")
+    def delete(self, slice_id):
+        """Delete a slice"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).delete_flows(slice_id)
+        return data, code
+
+    @e2e_ns.expect(slice_ddbb_model, validate=True)
+    @e2e_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
+    @e2e_ns.response(200, "Slice modified", slice_response_model)
+    @e2e_ns.response(404, "Transport network slice not found.")
+    @e2e_ns.response(500, "Internal server error")
+    def put(self, slice_id):
+        """Modify a slice"""
+        json_data = request.get_json()
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).modify_flow(slice_id, json_data)
+        return data, code
diff --git a/swagger/ixia_namespace.py b/swagger/ixia_namespace.py
index 6a14ffe995ad0fb2228c2d8deaf6dd91060ea510..e8f4ac9c04f97c832f78828a48ec0a637b675475 100644
--- a/swagger/ixia_namespace.py
+++ b/swagger/ixia_namespace.py
@@ -1,6 +1,23 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
 from flask import request
-from flask_restx import Namespace, Resource, fields, reqparse
-from src.network_slice_controller import NSController
+from flask_restx import Namespace, Resource, reqparse
+from src.main import NSController
+from src.api.main import Api
 import json
 from swagger.models.create_models import create_gpp_nrm_28541_model, create_ietf_network_slice_nbi_yang_model
 
@@ -13,13 +30,13 @@ ixia_ns = Namespace(
 gpp_network_slice_request_model = create_gpp_nrm_28541_model(ixia_ns)
 
 # IETF draft-ietf-teas-ietf-network-slice-nbi-yang Data models
-
 slice_ddbb_model, slice_response_model = create_ietf_network_slice_nbi_yang_model(ixia_ns)
 
 upload_parser = reqparse.RequestParser()
 upload_parser.add_argument('file', location='files', type='FileStorage', help="Archivo a subir")
 upload_parser.add_argument('json_data', location='form', help="Datos JSON en formato string")
 
+
 # Namespace Controllers
 @ixia_ns.route("/slice")
 class IxiaSliceList(Resource):
@@ -30,53 +47,51 @@ class IxiaSliceList(Resource):
     def get(self):
         """Retrieve all slices"""
         controller = NSController(controller_type="IXIA")
-        return controller.get_flows()
+        data, code = Api(controller).get_flows()
+        return data, code
     
     @ixia_ns.doc(summary="Submit a transport network slice request", description="This endpoint allows clients to submit transport network slice requests using a JSON payload.")
-    @ixia_ns.response(200, "Slice request successfully processed", slice_response_model)
+    @ixia_ns.response(201, "Slice created successfully", slice_response_model)
+    @ixia_ns.response(200, "No service to process.")
     @ixia_ns.response(400, "Invalid request format")
     @ixia_ns.response(500, "Internal server error")
     @ixia_ns.expect(upload_parser)
     def post(self):
         """Submit a new slice request with a file"""
-
         json_data = None
 
-        # Try to get the JSON data from the uploaded file
         uploaded_file = request.files.get('file')
         if uploaded_file:
             if not uploaded_file.filename.endswith('.json'):
-                return {"error": "Only JSON files allowed"}, 400
-            
+                return {"success": False, "data": None, "error": "Only JSON files allowed"}, 400
             try:
-                json_data = json.load(uploaded_file)  # Convert file to JSON
+                json_data = json.load(uploaded_file)
             except json.JSONDecodeError:
-                return {"error": "JSON file not valid"}, 400
+                return {"success": False, "data": None, "error": "JSON file not valid"}, 400
 
-        # If no file was uploaded, try to get the JSON data from the form
         if json_data is None:
             raw_json = request.form.get('json_data')
             if raw_json:
                 try:
-                    json_data = json.loads(raw_json)  # Convert string to JSON
+                    json_data = json.loads(raw_json)
                 except json.JSONDecodeError:
-                    return {"error": "JSON file not valid"}, 400
-        
-        # If no JSON data was found, return an error
+                    return {"success": False, "data": None, "error": "JSON file not valid"}, 400
+
         if json_data is None:
-            return {"error": "No data sent"}, 400
+            return {"success": False, "data": None, "error": "No data sent"}, 400
 
-        # Process the JSON data with the NSController
         controller = NSController(controller_type="IXIA")
-        return controller.add_flow(json_data)
+        data, code = Api(controller).add_flow(json_data)
+        return data, code
     
     @ixia_ns.doc(summary="Delete all transport network slices", description="Deletes all transport network slices from the slice controller.")
-    @ixia_ns.response(200, "All transport network slices deleted successfully.")
+    @ixia_ns.response(204, "All transport network slices deleted successfully.")
     @ixia_ns.response(500, "Internal server error")
     def delete(self):
         """Delete all slices"""
         controller = NSController(controller_type="IXIA")
-        return controller.delete_flows()
+        data, code = Api(controller).delete_flows()
+        return data, code
 
 
 @ixia_ns.route("/slice/")
@@ -89,16 +104,18 @@ class IxiaSlice(Resource):
     def get(self, slice_id):
         """Retrieve a specific slice"""
         controller = NSController(controller_type="IXIA")
-        return controller.get_flows(slice_id)
+        data, code = Api(controller).get_flows(slice_id)
+        return data, code
 
     @ixia_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
-    @ixia_ns.response(200, "Transport network slice deleted successfully.")
+    @ixia_ns.response(204, "Transport network slice deleted successfully.")
     @ixia_ns.response(404, "Transport network slice not found.")
     @ixia_ns.response(500, "Internal server error")
     def delete(self, slice_id):
         """Delete a slice"""
         controller = NSController(controller_type="IXIA")
-        return controller.delete_flows(slice_id)
+        data, code = Api(controller).delete_flows(slice_id)
+        return data, code
 
     @ixia_ns.expect(slice_ddbb_model, validate=True)
     @ixia_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
@@ -109,4 +126,5 @@ class IxiaSlice(Resource):
         """Modify a slice"""
         json_data = request.get_json()
         controller = NSController(controller_type="IXIA")
-        return controller.modify_flow(slice_id, json_data)
\ No newline at end of file
+        data, code = Api(controller).modify_flow(slice_id, json_data)
+        return data, code
\ No newline at end of file
diff --git a/swagger/models/create_models.py b/swagger/models/create_models.py
index 94ca83bc53b978beb68512dd5959452375256f67..a1f7d3a94758b1ea3efe10a41271fead6b8ab6c7 100644
--- a/swagger/models/create_models.py
+++ b/swagger/models/create_models.py
@@ -293,34 +293,50 @@ def create_ietf_network_slice_nbi_yang_model(slice_ns):
 
     slice_ddbb_model = slice_ns.model('ddbb_model', {
         'slice_id': fields.String(),
-        'intent': fields.List(fields.Nested(ietf_network_slice_request_model))
+        'intent': fields.List(fields.Nested(ietf_network_slice_request_model)),
+        'controller': fields.String()
     })
 
 
     slice_response_model = slice_ns.model(
         "SliceResponse",
         {
-            "status": fields.String(description="Status of the request", example="success"),
-            "slices": fields.List(
-                fields.Nested(
-                    slice_ns.model(
-                        "SliceDetails",
-                        {
-                            "id": fields.String(description="Slice ID", example="CU-UP1_DU1"),
-                            "source": fields.String(description="Source IP", example="100.2.1.2"),
-                            "destination": fields.String(description="Destination IP", example="100.1.1.2"),
-                            "vlan": fields.String(description="VLAN ID", example="100"),
-                            "bandwidth(Mbps)": fields.Integer(
-                                description="Bandwidth in Mbps", example=120
-                            ),
-                            "latency(ms)": fields.Integer(
-                                description="Latency in milliseconds", example=4
+            "success": fields.Boolean(description="Indicates if the request was successful", example=True),
+            "data": fields.Nested(
+                slice_ns.model(
+                    "SliceData",
+                    {
+                        "slices": fields.List(
+                            fields.Nested(
+                                slice_ns.model(
+                                    "SliceDetails",
+                                    {
+                                        "id": fields.String(description="Slice ID", example="slice-service-11327140-7361-41b3-aa45-e84a7fb40be9"),
+                                        "source": fields.String(description="Source IP", example="10.60.11.3"),
+                                        "destination": fields.String(description="Destination IP", example="10.60.60.105"),
+                                        "vlan": fields.String(description="VLAN ID", example="100"),
+                                        "requirements": fields.List(
+                                            fields.Nested(
+                                                slice_ns.model(
+                                                    "SliceRequirement",
+                                                    {
+                                                        "constraint_type": fields.String(description="Type of constraint", example="one-way-bandwidth[kbps]"),
+                                                        "constraint_value": fields.String(description="Constraint value", example="2000")
+                                                    }
+                                                )
+                                            ),
+                                            description="List of requirements for the slice"
+                                        )
+                                    }
+                                )
                             ),
-                        },
-                    )
-                ),
-                description="List of slices",
+                            description="List of slices"
+                        ),
+                        "setup_time": fields.Float(description="Slice setup time in milliseconds", example=12.57),
+                    }
+                )
             ),
-        },
+            "error": fields.String(description="Error message if request failed", example=None)
+        }
     )
     return slice_ddbb_model, slice_response_model
\ No newline at end of file
diff --git a/swagger/tfs_namespace.py b/swagger/tfs_namespace.py
index c9c3e07f591d13390df92712a746843a8d2326bd..09163602aeca9bb14521b272bcfa980093a9c5f2 100644
--- a/swagger/tfs_namespace.py
+++ b/swagger/tfs_namespace.py
@@ -16,7 +16,8 @@
 
 from flask import request
 from flask_restx import Namespace, Resource, fields, reqparse
-from src.network_slice_controller import NSController
+from src.main import NSController
+from src.api.main import Api
 import json
 from swagger.models.create_models import create_gpp_nrm_28541_model, create_ietf_network_slice_nbi_yang_model
 
@@ -33,8 +34,8 @@ gpp_network_slice_request_model = create_gpp_nrm_28541_model(tfs_ns)
 slice_ddbb_model, slice_response_model = create_ietf_network_slice_nbi_yang_model(tfs_ns)
 
 upload_parser = reqparse.RequestParser()
-upload_parser.add_argument('file', location='files', type='FileStorage', help="Archivo a subir")
-upload_parser.add_argument('json_data', location='form', help="Datos JSON en formato string")
+upload_parser.add_argument('file', location='files', type='FileStorage', help="File to upload")
+upload_parser.add_argument('json_data', location='form', help="JSON Data in string format")
 
 # Namespace Controllers
 @tfs_ns.route("/slice")
@@ -46,10 +47,12 @@ class TfsSliceList(Resource):
     def get(self):
         """Retrieve all slices"""
         controller = NSController(controller_type="TFS")
-        return controller.get_flows()
+        data, code = Api(controller).get_flows()
+        return data, code
     
     @tfs_ns.doc(summary="Submit a transport network slice request", description="This endpoint allows clients to submit transport network slice requests using a JSON payload.")
-    @tfs_ns.response(200, "Slice request successfully processed", slice_response_model)
+    @tfs_ns.response(201,"Slice created successfully", slice_response_model)
+    @tfs_ns.response(200, "No service to process.")
     @tfs_ns.response(400, "Invalid request format")
     @tfs_ns.response(500, "Internal server error")
     @tfs_ns.expect(upload_parser)
@@ -62,12 +65,20 @@ class TfsSliceList(Resource):
         uploaded_file = request.files.get('file')
         if uploaded_file:
             if not uploaded_file.filename.endswith('.json'):
-                return {"error": "Only JSON files allowed"}, 400
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "Only JSON files allowed"
+                }, 400
             
             try:
                 json_data = json.load(uploaded_file)  # Convert file to JSON
             except json.JSONDecodeError:
-                return {"error": "JSON file not valid"}, 400
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "JSON file not valid"
+                }, 400
 
         # If no file was uploaded, try to get the JSON data from the form
         if json_data is None:
@@ -76,23 +87,33 @@ class TfsSliceList(Resource):
                 try:
                     json_data = json.loads(raw_json)  # Convert string to JSON
                 except json.JSONDecodeError:
-                    return {"error": "JSON file not valid"}, 400
+                    return {
+                        "success": False,
+                        "data": None,
+                        "error": "JSON file not valid"
+                    }, 400
         
         # If no JSON data was found, return an error
         if json_data is None:
-            return {"error": "No data sent"}, 400
+            return {
+                    "success": False,
+                    "data": None,
+                    "error": "No data sent"
+                }, 400
 
         # Process the JSON data with the NSController
         controller = NSController(controller_type="TFS")
-        return controller.add_flow(json_data)
+        data, code = Api(controller).add_flow(json_data)
+        return data, code
     
     @tfs_ns.doc(summary="Delete all transport network slices", description="Deletes all transport network slices from the slice controller.")
-    @tfs_ns.response(200, "All transport network slices deleted successfully.")
+    @tfs_ns.response(204, "All transport network slices deleted successfully.")
     @tfs_ns.response(500, "Internal server error")
     def delete(self):
         """Delete all slices"""
         controller = NSController(controller_type="TFS")
-        return controller.delete_flows()
+        data, code = Api(controller).delete_flows()
+        return data, code
 
 
 @tfs_ns.route("/slice/")
@@ -105,26 +126,29 @@ class TfsSlice(Resource):
     def get(self, slice_id):
         """Retrieve a specific slice"""
         controller = NSController(controller_type="TFS")
-        return controller.get_flows(slice_id)
+        data, code = Api(controller).get_flows(slice_id)
+        return data, code
 
     @tfs_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
-    @tfs_ns.response(200, "Transport network slice deleted successfully.")
+    @tfs_ns.response(204, "Transport network slice deleted successfully.")
     @tfs_ns.response(404, "Transport network slice not found.")
     @tfs_ns.response(500, "Internal server error")
     def delete(self, slice_id):
         """Delete a slice"""
         controller = NSController(controller_type="TFS")
-        return controller.delete_flows(slice_id)
+        data, code = Api(controller).delete_flows(slice_id)
+        return data, code
 
     @tfs_ns.expect(slice_ddbb_model, validate=True)
     @tfs_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
-    @tfs_ns.response(200, "Slice modified", slice_ddbb_model)
+    @tfs_ns.response(200, "Slice modified", slice_response_model)
     @tfs_ns.response(404, "Transport network slice not found.")
     @tfs_ns.response(500, "Internal server error")
     def put(self, slice_id):
         """Modify a slice"""
         json_data = request.get_json()
         controller = NSController(controller_type="TFS")
-        return controller.modify_flow(slice_id, json_data)
+        data, code = Api(controller).modify_flow(slice_id, json_data)
+        return data, code
 
+## API
+
+The API has two namespaces: tfs and ixia, one dedicated to each controller, with the operations POST, GET, PUT and DELETE
+- `GET /{namespace}/slice`: returns a list with all transport network slices currently available in the controller.
+- `POST /{namespace}/slice`: allows the submission of a new network slice request
+- `DELETE /{namespace}/slice`: deletes all transport network slices stored in the controller.
+- `GET /{namespace}/slice/{slice_id}`: retrieves detailed information about a specific transport network slice identified by its slice_id 
+- `DELETE /{namespace}/slice/{slice_id}`: deletes a specific transport network slice identified by its slice_id
+- `PUT /{namespace}/slice/{slice_id}`: modifies a specific transport network slice identified by its slice_id
+
+The API is available in the swagger documentation panel at `{ip}:{NSC_PORT}/nsc`
+
+## WebUI
+
+The WebUI is a graphical interface that allows operating the NSC. Currently, it has more limited operations than the API. It supports the creation of slices in both Teraflow and IXIA controllers, as well as getting information of the current slices. Modify and deletion is not yet supported. 
+
+It is accessed at `{ip}:{NSC_PORT}/webui`
+
 ## Requirements
 - Python3.12
 - python3-pip
 - python3-venv
 
-## Configuration Constants
+## Configuration
 
-In the main configuration file, several constants can be adjusted to customize the Network Slice Controller (NSC) behavior:
+In the `src/config/.env.example` file, several constants can be adjusted to customize the Network Slice Controller (NSC) behaviour:
 
 ### Logging
 - `DEFAULT_LOGGING_LEVEL`: Sets logging verbosity
-  - Default: `logging.INFO`
-  - Options: `logging.DEBUG`, `logging.INFO`, `logging.WARNING`, `logging.ERROR`
-
-### Server
-- `NSC_PORT`: Server port
-  - Default: `8081`
-
-### Paths
-- `SRC_PATH`: Absolute path to source directory
-- `TEMPLATES_PATH`: Path to templates directory
+  - Default: `INFO`
+  - Options: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `NOTSET`, `CRITICAL`
+
+### General
+- `DUMP_TEMPLATES`: Flag to deploy templates for debugging
+  - Default: `false`
+
+## Mapper
+- `NRP_ENABLED`: Flag to determine if the NSC performs NRPs
+  - Default: `false`
+- `PLANNER_ENABLED`: Flag to activate the planner
+  - Default: `false`
+- `PCE_EXTERNAL`: Flag to determine if external PCE is used
+  - Default: `false`
+- `PLANNER_TYPE`:  Type of planner to be used
+  - Default: `ENERGY`
+  - Options: `ENERGY`, `HRAT`, `TFS_OPTICAL`
+- `HRAT_IP`: HRAT planner IP
+  - Default: `10.0.0.1`
+- `OPTICAL_PLANNER_IP`: Optical planner IP
+  - Default: `10.0.0.1`
+
+## Realizer
+- `DUMMY_MODE`: If true, no config sent to controllers
+  - Default: `true`
 
 ### Teraflow Configuration
-- `TFS_UPLOAD`: Enable/disable uploading slice service to Teraflow
-  - Default: `False`
+- `UPLOAD_TYPE`: Configure type of upload to Teraflow
+  - Default: `WEBUI`
+  - Options: `WEBUI`, `NBI`
 - `TFS_IP`: Teraflow SDN controller IP
-  - Default: `"192.168.165.10"`
+  - Default: `"127.0.0.1"`
 - `TFS_L2VPN_SUPPORT`: Enable additional L2VPN configuration support
   - Default: `False`
 
-## Usage
+### Ixia Configuration
+- `IXIA_IP`: Ixia NEII IP
+  - Default: `"127.0.0.1"`
 
-To deploy and execute the NSC, follow these steps:
+### WebUI
+- `WEBUI_DEPLOY`: Flag to deploy WebUI
+  - Default: `False`
 
-0. **Preparation**
-    ```
-    git clone https://labs.etsi.org/rep/tfs/nsc.git
-    cd nsc
-    python3 -m venv venv
-    source venv/bin/activate
-    pip install -r requirements.txt
-    ```
+## Usage
+
+To use the NSC, just build the image an run it in a container following these steps:
 
-1. **Start NSC Server**:
+1. **Deploy**
     ```
-    python3 app.py
+    ./deploy.sh
     ```
 
-2. **Generate Slice Requests**:
+2. **Send Slice Requests**:
 
-    To send slice request, the NSC accepts POST request at the endpoint /slice. It is available in the swagger documentation panel at {ip}:{NSC_PORT}/nsc
+    Send slice requests via **API** (/nsc) or **WebUI** (/webui)
 
-
+- The branches `release/X.Y.Z`, point to the code for the different release versions indicated in the branch name.
+  - Code in these branches can be considered stable, and no new features are planned.
+  - In case of bugs, point releases increasing revision number (Z) might be created.
 
+- The `develop` ([](https://labs.etsi.org/rep/tfs/nsc/-/commits/develop) [](https://labs.etsi.org/rep/tfs/nsc/-/commits/develop)) branch is the main development branch and contains the latest contributions.
+  - **Use it with care! It might not be stable.**
+  - The latest developments and contributions are added to this branch for testing and validation before reaching a release.
diff --git a/app.py b/app.py
index 61503b3b4600483708559bac25bee7cb4588a2a6..c7c0695591b935ba00b611ff9828d398118a2514 100644
--- a/app.py
+++ b/app.py
@@ -14,36 +14,51 @@
 
 # This file is an original contribution from Telefonica Innovación Digital S.L.
 
-import os
+import logging
 from flask import Flask
 from flask_restx import Api
 from flask_cors import CORS
 from swagger.tfs_namespace import tfs_ns
 from swagger.ixia_namespace import ixia_ns
-from src.Constants import NSC_PORT, WEBUI_DEPLOY
+from swagger.E2E_namespace import e2e_ns
+from src.config.constants import NSC_PORT
 from src.webui.gui import gui_bp
+from src.config.config import create_config
+from src.database.db import init_db
 
-app = Flask(__name__)
-CORS(app)
+def create_app():
+    """Create Flask application with configured API and namespaces."""
+    init_db()
+    app = Flask(__name__)
+    app = create_config(app)
+    CORS(app)
 
-# Create API instance
-api = Api(
-    app,
-    version="1.0",
-    title="Network Slice Controller (NSC) API",
-    description="API for orchestrating and realizing transport network slice requests",
-    doc="/nsc"  # Swagger UI URL
-)
+    # Configure logging to provide clear and informative log messages
+    logging.basicConfig(
+        level=app.config["LOGGING_LEVEL"],
+        format="%(levelname)s - %(message)s"
+    )
 
-# Register namespaces
-api.add_namespace(tfs_ns, path="/tfs")
-api.add_namespace(ixia_ns, path="/ixia")
-#gui_bp = Blueprint('gui', __name__, template_folder='templates')
+    # Create API instance
+    api = Api(
+        app,
+        version="1.0",
+        title="Network Slice Controller (NSC) API",
+        description="API for orchestrating and realizing transport network slice requests",
+        doc="/nsc"  # Swagger UI URL
+    )
 
-if WEBUI_DEPLOY:
-    app.secret_key = 'clave-secreta-dev' 
-    app.register_blueprint(gui_bp)
+    # Register namespaces
+    api.add_namespace(tfs_ns, path="/tfs")
+    api.add_namespace(ixia_ns, path="/ixia")
+    api.add_namespace(e2e_ns, path="/e2e")
 
+    if app.config["WEBUI_DEPLOY"]:
+        app.secret_key = "clave-secreta-dev"
+        app.register_blueprint(gui_bp)
+
+    return app
 
 if __name__ == "__main__":
+    app = create_app()
     app.run(host="0.0.0.0", port=NSC_PORT, debug=True)
diff --git a/deploy.sh b/deploy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8bd99bfca93f7a6126fec16290a73a8eab25fd78
--- /dev/null
+++ b/deploy.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+# Container name
+CONTAINER_NAME=nsc
+
+# Verify if docker is active
+if ! docker info > /dev/null 2>&1; then
+    echo "Error: Docker not running. Please, restart Docker service and try again."
+    exit 1
+fi
+
+# Stop container if running
+echo "Verify if '$CONTAINER_NAME' is running..."
+if [ $(docker ps -q -f name=$CONTAINER_NAME) ]; then
+    echo "Stopping current container '$CONTAINER_NAME'..."
+    docker stop $CONTAINER_NAME
+fi
+
+# Cleaning residual containers and images
+echo "Cleaning old Docker containers and images..."
+docker container prune -f
+docker image prune -f
+
+# Verificar que .env.example existe
+if [ ! -f src/config/.env.example ]; then
+    echo "Error: .env.example not found"
+    exit 1
+fi
+
+# Copy .env.example to .env
+echo "Generating .env file..."
+cp src/config/.env.example .env
+
+# Read NSC_PORT from .env
+NSC_PORT=$(grep '^NSC_PORT=' .env | cut -d '=' -f2)
+
+# Docker build
+echo "Building docker image..."
+docker build -t nsc .
+
+# Executing nsc
+echo "Running nsc on port $NSC_PORT..."
+docker run -d --env-file .env -p $NSC_PORT:$NSC_PORT --name $CONTAINER_NAME $CONTAINER_NAME
+echo "---READY---"
diff --git a/images/NSC_Architecture.png b/images/NSC_Architecture.png
index 852437d55f3fadcb9c6a4303be7c70a264977e30..7abb89ba0da61ad538251f5537a7731e44b2319a 100644
Binary files a/images/NSC_Architecture.png and b/images/NSC_Architecture.png differ
diff --git a/requirements.txt b/requirements.txt
index e01b5584ed4de910f98767f741c470b1c05637d5..6e8674fea7327e70902540cd2b1847086d5d1176 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,25 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
 Flask
 flask-cors
 flask-restx
 netmiko
 requests
+pandas
+dotenv
+coverage
+pytest
diff --git a/scripts/show_logs_nsc.sh b/scripts/show_logs_nsc.sh
new file mode 100644
index 0000000000000000000000000000000000000000..53a02fc5e5ee16c526ea5753501bba3e43688d05
--- /dev/null
+++ b/scripts/show_logs_nsc.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+docker logs nsc
\ No newline at end of file
diff --git a/src/Constants.py b/src/Constants.py
deleted file mode 100644
index 3b02ffd287c6eced608c00b993a71605fe53d0d4..0000000000000000000000000000000000000000
--- a/src/Constants.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-#     http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file includes original contributions from Telefonica Innovación Digital S.L.
-
-import logging, os, json
-
-# Default logging level
-DEFAULT_LOGGING_LEVEL = logging.INFO
-
-# Default port for NSC deployment
-NSC_PORT = 8081
-
-# Paths
-# Obtain the absolute path of the current file
-SRC_PATH = os.path.dirname(os.path.abspath(__file__))
-with open(os.path.join(SRC_PATH, 'IPs.json')) as f:
-    ips = json.load(f)
-
-# Create the path to the desired file relative to the current file
-TEMPLATES_PATH = os.path.join(SRC_PATH, "templates")
-
-# Dump templates
-DUMP_TEMPLATES = False
-
-# Mapper 
-
-# Flag to determine if the NSC performs NRPs
-NRP_ENABLED = False
-# Planner Flags
-PLANNER_ENABLED = True
-# Flag to determine if external PCE is used
-PCE_EXTERNAL = False
-
-# Realizer 
-
-# Controller Flags
-# If True, config is not sent to controllers
-DUMMY_MODE = False
-
-#####TERAFLOW#####
-# Teraflow IP
-TFS_IP = ips.get('TFS_IP')
-UPLOAD_TYPE = "WEBUI"  # "WEBUI" or "NBI"
-NBI_L2_PATH = "restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services"
-NBI_L3_PATH = "restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services"
-# Flag to determine if additional L2VPN configuration support is required for deploying L2VPNs with path selection
-TFS_L2VPN_SUPPORT = False
-
-#####IXIA#####
-# IXIA NEII IP
-IXIA_IP = ips.get('IXIA_IP')
-
-# WebUI
-
-# Flag to deploy the WebUI
-WEBUI_DEPLOY = True
\ No newline at end of file
diff --git a/src/api/main.py b/src/api/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cf60b81ebebba19ee02f8599db94644f91a6e98
--- /dev/null
+++ b/src/api/main.py
@@ -0,0 +1,216 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from src.utils.send_response import send_response
+import logging
+from flask import current_app
+from src.database.db import get_data, delete_data, get_all_data, delete_all_data
+from src.realizer.tfs.helpers.tfs_connector import tfs_connector
+from src.utils.safe_get import safe_get
+
+class Api:
+    def __init__(self, slice_service):
+        self.slice_service = slice_service
+    
+    def add_flow(self, intent):
+        """
+        Create a new transport network slice.
+
+        Args:
+            intent (dict): Network slice intent in 3GPP or IETF format
+
+        Returns:
+            Result of the Network Slice Controller (NSC) operation
+
+        API Endpoint:
+            POST /slice
+
+        Raises:
+            RuntimeError: If there is no content to process
+            Exception: For unexpected errors
+        """
+        try:
+            result = self.slice_service.nsc(intent)
+            if not result:
+                return send_response(False, code=404, message="No intents found")
+            logging.info(f"Slice created successfully")
+            return send_response(
+                True,
+                code=201,
+                data=result 
+            )
+        except RuntimeError as e:
+            # Handle case where there is no content to process
+            return send_response(False, code=200, message=str(e))
+        except Exception as e:
+            # Handle unexpected errors
+            return send_response(False, code=500, message=str(e))
+    
+    def get_flows(self,slice_id=None):
+        """
+        Retrieve transport network slice information.
+
+        This method allows retrieving:
+        - All transport network slices
+        - A specific slice by its ID
+
+        Args:
+            slice_id (str, optional): Unique identifier of a specific slice. 
+                                      Defaults to None.
+
+        Returns:
+            dict or list: 
+            - If slice_id is provided: Returns the specific slice details
+            - If slice_id is None: Returns a list of all slices
+            - Returns an error response if no slices are found
+
+        API Endpoint:
+            GET /slice/{id}
+
+        Raises:
+            ValueError: If no transport network slices are found
+            Exception: For unexpected errors
+        """
+        try:
+            # Read slice database from JSON file
+            content = get_all_data()
+            # If specific slice ID is provided, find and return matching slice
+            if slice_id:
+                for slice in content:
+                    if slice["slice_id"] == slice_id:
+                        return slice, 200
+                raise ValueError("Transport network slices not found")
+            # If no slices exist, raise an error
+            if len(content) == 0:
+                raise ValueError("Transport network slices not found")
+            
+            # Return all slices if no specific ID is given
+            return [slice for slice in content if slice.get("controller") == self.slice_service.controller_type], 200
+        
+        except ValueError as e:
+            # Handle case where no slices are found
+            return send_response(False, code=404, message=str(e))
+        except Exception as e:
+            # Handle unexpected errors
+            return send_response(False, code=500, message=str(e))
+    
+    def modify_flow(self,slice_id, intent):
+        """
+        Modify an existing transport network slice.
+
+        Args:
+            slice_id (str): Unique identifier of the slice to modify
+            intent (dict): New intent configuration for the slice
+
+        Returns:
+            Result of the Network Slice Controller (NSC) operation
+
+        API Endpoint:
+            PUT /slice/{id}
+        Raises:
+            Exception: For unexpected errors
+        """
+        try:
+            result = self.slice_service.nsc(intent, slice_id)
+            if not result:
+                return send_response(False, code=404, message="Slice not found")
+            logging.info(f"Slice {slice_id} modified successfully")
+            return send_response(
+                True,
+                code=200,
+                message="Slice modified successfully",
+                data=result
+            )
+        except ValueError as e:
+            # Handle case where no slices are found
+            return send_response(False, code=404, message=str(e))
+        except Exception as e:
+            # Handle unexpected errors
+            return send_response(False, code=500, message=str(e))
+    
+    def delete_flows(self, slice_id=None):
+        """
+        Delete transport network slice(s).
+
+        This method supports:
+        - Deleting a specific slice by ID
+        - Deleting all slices
+        - Optional cleanup of L2VPN configurations
+
+        Args:
+            slice_id (str, optional): Unique identifier of slice to delete. 
+                                      Defaults to None.
+
+        Returns:
+            dict: {} indicating successful deletion or error details
+
+        API Endpoint:
+            DELETE /slice/{id}
+
+        Raises:
+            ValueError: If no slices are found to delete
+            Exception: For unexpected errors
+
+        Notes:
+            - If controller_type is TFS, attempts to delete from Teraflow
+            - If need_l2vpn_support is True, performs additional L2VPN cleanup
+        """
+        try:
+            # Delete specific slice if slice_id is provided
+            if slice_id:
+                slice = get_data(slice_id)
+                # Raise error if slice not found
+                if not slice or slice.get("controller") != self.slice_service.controller_type:
+                    raise ValueError("Transport network slice not found")
+                # Delete in Teraflow
+                if not current_app.config["DUMMY_MODE"]:
+                    if self.slice_service.controller_type == "TFS":
+                        slice_type = safe_get(slice, ['intent', 'ietf-network-slice-service:network-slice-services', 'slice-service', 0, 'service-tags', 'tag-type', 0, 'tag-type-value', 0])
+                        if not slice_type:
+                            slice_type = "L2"
+                            logging.warning(f"Slice type not found in slice intent. Defaulting to L2")
+                        tfs_connector().nbi_delete(current_app.config["TFS_IP"],slice_type, slice_id)
+                # Update slice database
+                delete_data(slice_id)
+                logging.info(f"Slice {slice_id} removed successfully")
+                return {}, 204
+            
+            # Delete all slices
+            else:
+                # Optional: Delete in Teraflow if configured
+                if not current_app.config["DUMMY_MODE"]:
+                    if self.slice_service.controller_type == "TFS":
+                        content = get_all_data()
+                        for slice in content:
+                            if slice.get("controller") == self.slice_service.controller_type:
+                                slice_type = safe_get(slice, ['intent', 'ietf-network-slice-service:network-slice-services', 'slice-service', 0, 'service-tags', 'tag-type', 0, 'tag-type-value', 0])
+                                if not slice_type:
+                                    slice_type = "L2"
+                                    logging.warning(f"Slice type not found in slice intent. Defaulting to L2")
+                                tfs_connector().nbi_delete(current_app.config["TFS_IP"],slice_type, slice.get("slice_id"))
+                        if current_app.config["TFS_L2VPN_SUPPORT"]:
+                            self.slice_service.tfs_l2vpn_delete()
+
+                # Clear slice database
+                delete_all_data()
+
+                logging.info("All slices removed successfully")
+                return {}, 204
+        
+        except ValueError as e:
+            return send_response(False, code=404, message=str(e))
+        except Exception as e:
+            return send_response(False, code=500, message=str(e))
\ No newline at end of file
diff --git a/src/config/.env.example b/src/config/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..fb565b7443f1a3722aaab7a9b8fb7fc077cb027c
--- /dev/null
+++ b/src/config/.env.example
@@ -0,0 +1,69 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+# -------------------------
+# General
+# -------------------------
+NSC_PORT=8081
+# Options: CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
+LOGGING_LEVEL=INFO
+DUMP_TEMPLATES=false
+
+# -------------------------
+# Mapper
+# -------------------------
+# Flag to determine if the NSC performs NRPs
+NRP_ENABLED=false
+# Planner Flags
+PLANNER_ENABLED=true
+# Flag to determine if external PCE is used
+PCE_EXTERNAL=false
+# Type of planner to be used. Options: ENERGY, HRAT, TFS_OPTICAL
+PLANNER_TYPE=ENERGY
+# HRAT
+HRAT_IP=10.0.0.1
+# TFS_OPTICAL
+OPTICAL_PLANNER_IP=10.0.0.1
+
+# -------------------------
+# Realizer
+# -------------------------
+# If true, no config sent to controllers
+DUMMY_MODE=true
+
+# -------------------------
+# Teraflow
+# -------------------------
+TFS_IP=127.0.0.1
+# Options: WEBUI or NBI
+UPLOAD_TYPE=WEBUI
+# Flag to determine if additional L2VPN configuration support is required for deploying L2VPNs with path selection
+TFS_L2VPN_SUPPORT=false
+
+# -------------------------
+# IXIA
+# -------------------------
+IXIA_IP=127.0.0.1
+
+# -------------------------
+# E2E Controller
+# -------------------------
+TFS_E2E_IP=127.0.0.1
+
+# -------------------------
+# WebUI
+# -------------------------
+WEBUI_DEPLOY=true
diff --git a/src/IPs.json b/src/config/IPs.json
similarity index 100%
rename from src/IPs.json
rename to src/config/IPs.json
diff --git a/src/config/config.py b/src/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..04b72aae571b67b8e490a01267c4af5aac4e12d9
--- /dev/null
+++ b/src/config/config.py
@@ -0,0 +1,67 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import os
+from dotenv import load_dotenv
+from flask import Flask
+import logging
+
+# Load .env file if present
+load_dotenv()
+
+LOG_LEVELS = {
+    "CRITICAL": logging.CRITICAL,
+    "ERROR": logging.ERROR,
+    "WARNING": logging.WARNING,
+    "INFO": logging.INFO,
+    "DEBUG": logging.DEBUG,
+    "NOTSET": logging.NOTSET,
+}
+
+def create_config(app: Flask):
+    """Load flags into Flask app.config"""
+    # Default logging level
+    app.config["LOGGING_LEVEL"] = LOG_LEVELS.get(os.getenv("LOGGING_LEVEL", "INFO").upper(),logging.INFO)
+
+    # Dump templates
+    app.config["DUMP_TEMPLATES"] = os.getenv("DUMP_TEMPLATES", "false").lower() == "true"
+
+    # Mapper
+    app.config["NRP_ENABLED"] = os.getenv("NRP_ENABLED", "false").lower() == "true"
+    app.config["PLANNER_ENABLED"] = os.getenv("PLANNER_ENABLED", "false").lower() == "true"
+    app.config["PLANNER_TYPE"] = os.getenv("PLANNER_TYPE", "ENERGY")
+    app.config["PCE_EXTERNAL"] = os.getenv("PCE_EXTERNAL", "false").lower() == "true"
+    app.config["HRAT_IP"] = os.getenv("HRAT_IP", "192.168.1.143")
+    app.config["OPTICAL_PLANNER_IP"] = os.getenv("OPTICAL_PLANNER_IP", "10.30.7.66")
+
+    # Realizer
+    app.config["DUMMY_MODE"] = os.getenv("DUMMY_MODE", "true").lower() == "true"
+
+    # Teraflow
+    app.config["TFS_IP"] = os.getenv("TFS_IP", "127.0.0.1")
+    app.config["UPLOAD_TYPE"] = os.getenv("UPLOAD_TYPE", "WEBUI")
+    app.config["TFS_L2VPN_SUPPORT"] = os.getenv("TFS_L2VPN_SUPPORT", "false").lower() == "true"
+
+    # IXIA
+    app.config["IXIA_IP"] = os.getenv("IXIA_IP", "127.0.0.1")
+
+    # E2E Controller
+    app.config["TFS_E2E_IP"] = os.getenv("TFS_E2E_IP", "127.0.0.1")
+
+    # WebUI
+    app.config["WEBUI_DEPLOY"] = os.getenv("WEBUI_DEPLOY", "false").lower() == "true"
+
+    return app
diff --git a/src/config/constants.py b/src/config/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3d6b8783c5b610357ab07e9a566d9c71b74afb1
--- /dev/null
+++ b/src/config/constants.py
@@ -0,0 +1,33 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+from pathlib import Path
+import os
+
+# Default port for NSC deployment
+NSC_PORT = os.getenv("NSC_PORT", "8081")
+
+# Paths
+BASE_DIR = Path(__file__).resolve().parent.parent.parent
+SRC_PATH = BASE_DIR / "src"
+TEMPLATES_PATH = SRC_PATH / "templates"
+DATABASE_PATH = SRC_PATH / "database"
+CONFIG_PATH = SRC_PATH / "config"
+NBI_L2_PATH = "restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services"
+NBI_L3_PATH = "restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services"
+
+
+
diff --git a/src/database/db.py b/src/database/db.py
new file mode 100644
index 0000000000000000000000000000000000000000..341d79eb9e4f4f33487067b2890b0621872ef67c
--- /dev/null
+++ b/src/database/db.py
@@ -0,0 +1,195 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sqlite3, json, logging
+
+# Database file
+DB_NAME = "slice.db"
+
+# Initialize database and create table
+def init_db():
+    """
+    Initialize the SQLite database and create the slice table if not exists.
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("""
+        CREATE TABLE IF NOT EXISTS slice (
+            slice_id TEXT PRIMARY KEY,
+            intent TEXT NOT NULL,
+            controller TEXT NOT NULL
+        )
+    """)
+    conn.commit()
+    conn.close()
+
+# Save data to the database
+def save_data(slice_id: str, intent_dict: dict, controller: str):
+    """
+    Save a new slice entry to the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice
+        intent_dict (dict): Intent data
+        controller (str): Controller type
+    
+    Raises:
+        ValueError: If a slice with the given slice_id already exists
+    """
+    intent_str = json.dumps(intent_dict)
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    try:
+        cursor.execute("INSERT INTO slice (slice_id, intent, controller) VALUES (?, ?, ?)", (slice_id, intent_str, controller))
+        conn.commit()
+    # Handle duplicate slice ID
+    except sqlite3.IntegrityError:
+        raise ValueError(f"Slice with id '{slice_id}' already exists.")
+    finally:
+        conn.close()
+
+# Update data in the database
+def update_data(slice_id: str, new_intent_dict: dict, controller: str):
+    """
+    Update an existing slice entry in the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice
+        new_intent_dict (dict): New intent data
+        controller (str): Controller type
+    
+    Raises:
+        ValueError: If no slice is found with the given slice_id
+    """
+    intent_str = json.dumps(new_intent_dict)
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("UPDATE slice SET intent = ?, controller = ? WHERE slice_id = ?", (intent_str, controller, slice_id))
+    if cursor.rowcount == 0:
+        raise ValueError(f"No slice found with id '{slice_id}' to update.")
+    else:
+        logging.debug(f"Slice '{slice_id}' updated.")
+    conn.commit()
+    conn.close()
+
+# Delete data from the database
+def delete_data(slice_id: str):
+    """
+    Delete a slice entry from the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice to delete
+    
+    Raises:
+        ValueError: If no slice is found with the given slice_id
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("DELETE FROM slice WHERE slice_id = ?", (slice_id,))
+    if cursor.rowcount == 0:
+        raise ValueError(f"No slice found with id '{slice_id}' to delete.")
+    else:
+        logging.debug(f"Slice '{slice_id}' deleted.")
+    conn.commit()
+    conn.close()
+
+# Get data from the database
+def get_data(slice_id: str) -> dict[str, dict, str]:
+    """
+    Retrieve a specific slice entry from the database.
+
+    Args:
+        slice_id (str): Unique identifier for the slice to retrieve
+    
+    Returns:
+        dict: Slice data including slice_id, intent (as dict), and controller
+    
+    Raises:
+        ValueError: If no slice is found with the given slice_id
+        Exception: For JSON decoding errors
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("SELECT * FROM slice WHERE slice_id = ?", (slice_id,))
+    row = cursor.fetchone()
+    conn.close()
+
+    if row:
+        column_names = [description[0] for description in cursor.description]
+        result = dict(zip(column_names, row))
+        if isinstance(result.get("intent"), str):
+            try:
+                result["intent"] = json.loads(result["intent"])
+            except json.JSONDecodeError:
+                raise Exception("Warning: 'intent' is not a valid JSON string.")
+        return result
+
+    else:
+        raise ValueError(f"No slice found with id '{slice_id}'.")
+
+# Get all slices
+def get_all_data() -> dict[str, dict, str]:
+    """
+    Retrieve all slice entries from the database.
+
+    Returns:
+        list: List of slice data dictionaries including slice_id, intent (as dict), and controller
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("SELECT * FROM slice")
+    rows = cursor.fetchall()
+    conn.close()
+    return [
+        {
+        "slice_id": row[0],
+        "intent": json.loads(row[1]),
+        "controller": row[2] 
+        }
+    for row in rows
+    ]
+
+def delete_all_data():
+    """
+    Delete all slice entries from the database.
+    """
+    conn = sqlite3.connect(DB_NAME)
+    cursor = conn.cursor()
+    cursor.execute("DELETE FROM slice")
+    conn.commit()
+    conn.close()
+    logging.debug("All slice data deleted.")
+
+# Example usage
+if __name__ == "__main__":
+    init_db()
+
+    # Save a slice
+    test_intent = {"bandwidth": "1Gbps", "latency": "10ms", "provider": "opensec"}
+    save_data("slice-001", test_intent, "TFS")
+
+    # Get the slice
+    result = get_data("slice-001")
+    if result:
+        print(f"Retrieved intent for slice-001: {result}")
+
+    # Update the slice
+    updated_intent = {"bandwidth": "2Gbps", "latency": "5ms", "provider": "opensec"}
+    update_data("slice-001", updated_intent, "TFS")
+
+    # Delete the slice
+    delete_data("slice-001")
+
+    get_all_data()
+    delete_all_data()
diff --git a/src/nrp_ddbb.json b/src/database/nrp_ddbb.json
similarity index 91%
rename from src/nrp_ddbb.json
rename to src/database/nrp_ddbb.json
index 948967ef9fd1a9389ac634b19255857a5e13d3aa..1616438516aabb21393b339ee45bf7dc637803c2 100644
--- a/src/nrp_ddbb.json
+++ b/src/database/nrp_ddbb.json
@@ -6,12 +6,12 @@
             {
               "metric-type": "one-way-bandwidth",
               "metric-unit": "kbps",
-              "bound": 1
+              "bound": 100000000000
             },
             {
               "metric-type": "one-way-delay-maximum",
               "metric-unit": "milliseconds",
-              "bound": 800
+              "bound": 1
             }
           ],
         "slices": ["slice-service-02873501-bf0a-4b02-8540-2f9d970ea20f", "slice-service-e3b22fa8-f3da-4da8-881b-c66e5161b4a5"],
@@ -24,12 +24,12 @@
           {
             "metric-type": "one-way-bandwidth",
             "metric-unit": "kbps",
-            "bound": 1
+            "bound": 10000000000000
           },
           {
             "metric-type": "one-way-delay-maximum",
             "metric-unit": "milliseconds",
-            "bound": 800
+            "bound": 2
           }
         ],
       "slices": ["slice-service-02873501-bf0a-4b02-8540-2f9d970ea20f", "slice-service-e3b22fa8-f3da-4da8-881b-c66e5161b4a5"],
diff --git a/src/database/store_data.py b/src/database/store_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fccbfb6c61878ac5658b9c8570332dbab263075
--- /dev/null
+++ b/src/database/store_data.py
@@ -0,0 +1,38 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from src.database.db import save_data, update_data
+
+def store_data(intent, slice_id, controller_type=None):
+    """
+    Store network slice intent information in a JSON database file.
+
+    This method:
+    1. Creates a JSON file if it doesn't exist
+    2. Reads existing content
+    3. Updates or adds new slice intent information
+
+    Args:
+        intent (dict): Network slice intent to be stored
+        slice_id (str, optional): Existing slice ID to update. Defaults to None.
+    """
+    # Update or add new slice intent
+    if slice_id:
+        update_data(slice_id, intent, controller_type)
+    else:
+        # Add new slice intent
+        slice_id = intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        save_data(slice_id, intent, controller_type)
\ No newline at end of file
diff --git a/src/main.py b/src/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ef517d17df077bbfdbc9992ac0a90c3820dfdf2
--- /dev/null
+++ b/src/main.py
@@ -0,0 +1,129 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging
+import time
+from src.utils.dump_templates import dump_templates
+from src.utils.build_response import build_response
+from src.nbi_processor.main import nbi_processor
+from src.database.store_data import store_data
+from src.mapper.main import mapper
+from src.realizer.main import realizer
+from src.realizer.send_controller import send_controller
+
+class NSController:
+    """
+    Network Slice Controller (NSC) - A class to manage network slice creation,
+    modification, and deletion across different network domains.
+
+    This controller handles the translation, mapping, and realization of network
+    slice intents from different formats (3GPP and IETF) to network-specific
+    configurations.
+
+    Key Functionalities:
+    - Intent Processing: Translate and process network slice intents
+    - Slice Management: Create, modify, and delete network slices
+    - NRP (Network Resource Partition) Mapping: Match slice requirements with available resources
+    - Slice Realization: Convert intents to specific network configurations (L2VPN, L3VPN)
+    """
+
+    def __init__(self, controller_type = "TFS"):
+        """
+        Initialize the Network Slice Controller.
+
+        Args:
+            controller_type (str): Flag to determine if configurations
+                should be uploaded to Teraflow or IXIA system.
+
+        Attributes:
+            controller_type (str): Flag for Teraflow or Ixia upload
+            response (dict): Stores slice creation responses
+            start_time (float): Tracks slice setup start time
+            end_time (float): Tracks slice setup end time
+            setup_time (float): Total time taken for slice setup in milliseconds
+        """
+        self.controller_type = controller_type
+
+        self.path = ""
+        self.response = []
+        self.start_time = 0
+        self.end_time = 0
+        self.setup_time = 0
+
+    def nsc(self, intent_json, slice_id=None):
+        """
+        Main Network Slice Controller method to process and realize network slice intents.
+
+        Workflow:
+        1. Load IETF template
+        2. Process intent (detect format, translate if needed)
+        3. Extract slice data
+        4. Store slice information
+        5. Map slice to Network Resource Pool (NRP)
+        6. Realize slice configuration
+        7. Send configuration to network controllers
+
+        Args:
+            intent_json (dict): Network slice intent in 3GPP or IETF format
+            slice_id (str, optional): Existing slice identifier for modification
+
+        Returns:
+            dict: Contains slice creation responses and setup time in milliseconds
+
+        """
+        # Start performance tracking
+        self.start_time = time.perf_counter()
+
+        # Reset requests
+        requests = {"services":[]}
+        response = None
+
+        # Process intent (translate if 3GPP)
+        ietf_intents = nbi_processor(intent_json)
+
+        for intent in ietf_intents:
+            # Mapper
+            rules = mapper(intent)
+            # Build response
+            self.response = build_response(intent, self.response, controller_type= self.controller_type)
+            # Realizer
+            request = realizer(intent, controller_type=self.controller_type, response = self.response, rules = rules)
+            # Store slice request details
+            if request: 
+                requests["services"].append(request)
+                store_data(intent, slice_id, controller_type=self.controller_type)
+
+        # Store the generated template for debugging
+        dump_templates(intent_json, ietf_intents, requests)
+
+        # Check if there are services to process
+        if not requests.get("services"):
+            raise RuntimeError("No service to process.")
+
+        # Send config to controllers
+        response = send_controller(self.controller_type, requests)
+
+        if not response:
+            raise Exception("Controller upload failed")
+
+        # End performance tracking
+        self.end_time = time.perf_counter()
+        setup_time = (self.end_time - self.start_time) * 1000
+
+        return {
+            "slices": self.response,
+            "setup_time": setup_time
+        }
\ No newline at end of file
diff --git a/src/mapper/main.py b/src/mapper/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e0a4c7aefbd535b692d30c24a925ffee74b9b9d
--- /dev/null
+++ b/src/mapper/main.py
@@ -0,0 +1,77 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from src.planner.planner import Planner
+from .slo_viability import slo_viability
+from src.realizer.main import realizer
+from flask import current_app
+
+def mapper(ietf_intent):
+    """
+    Map an IETF network slice intent to the most suitable Network Resource Partition (NRP).
+
+    This method:
+    1. If NRP is enabled, retrieves the current NRP view
+    2. Extracts Service Level Objectives (SLOs) from the intent
+    3. Finds NRPs that can meet the SLO requirements
+    4. Selects the best NRP based on viability and availability
+    5. Attaches the slice to the selected NRP or creates a new one
+    6. If planner is enabled, computes the optimal path for the slice
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+
+    Returns:
+        dict or None: Optimal path if planner is enabled; otherwise, None.
+    """
+    if current_app.config["NRP_ENABLED"]:
+        # Retrieve NRP view
+        nrp_view = realizer(None, True, "READ")
+
+        # Extract Service Level Objectives (SLOs) from the intent
+        slos = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+        if slos:
+            # Find candidate NRPs that can meet the SLO requirements
+            candidates = [
+                (nrp, slo_viability(slos, nrp)[1])
+                for nrp in nrp_view
+                if slo_viability(slos, nrp)[0] and nrp["available"]
+            ]
+            logging.debug(f"Candidates: {candidates}")
+
+            # Select the best NRP based on candidates
+            best_nrp = max(candidates, key=lambda x: x[1])[0] if candidates else None
+            logging.debug(f"Best NRP: {best_nrp}")
+
+            if best_nrp:
+                best_nrp["slices"].append(ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"])
+                # Update NRP view
+                realizer(ietf_intent, True, "UPDATE")
+                # TODO Here we should put how the slice is attached to an already created nrp
+            else:
+                # Request the controller to create a new NRP that meets the SLOs
+                answer = realizer(ietf_intent, True, "CREATE", best_nrp)
+                if not answer:
+                    logging.error("Slice rejected due to lack of NRPs")
+                    return None
+                # TODO Here we should put how the slice is attached to the new nrp
+
+    if current_app.config["PLANNER_ENABLED"]:
+        optimal_path = Planner().planner(ietf_intent, current_app.config["PLANNER_TYPE"])
+        logging.debug(f"Optimal path: {optimal_path}")
+        return optimal_path
+    return None
\ No newline at end of file
diff --git a/src/mapper/slo_viability.py b/src/mapper/slo_viability.py
new file mode 100644
index 0000000000000000000000000000000000000000..a91b9296e6d67c0270e232faea491bb7f7d1a914
--- /dev/null
+++ b/src/mapper/slo_viability.py
@@ -0,0 +1,64 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+
+def slo_viability(slice_slos, nrp_slos):
+    """
+    Compare Service Level Objectives (SLOs) between a slice and a Network Resource Partition (NRP).
+
+    This method assesses whether an NRP can satisfy the SLOs of a network slice.
+
+    Args:
+        slice_slos (list): Service Level Objectives of the slice
+        nrp_slos (dict): Service Level Objectives of the Network Resource Pool
+
+    Returns:
+        tuple: A boolean indicating viability and a flexibility score
+            - First value: True if NRP meets SLOs, False otherwise
+            - Second value: A score representing how well the NRP meets the SLOs
+    """
+    # Define SLO types for maximum and minimum constraints
+    slo_type = {
+        "max": ["one-way-delay-maximum", "two-way-delay-maximum", "one-way-delay-percentile", "two-way-delay-percentile",
+                "one-way-delay-variation-maximum", "two-way-delay-variation-maximum",
+                "one-way-delay-variation-percentile", "two-way-delay-variation-percentile",
+                "one-way-packet-loss", "two-way-packet-loss"],
+        "min": ["one-way-bandwidth", "two-way-bandwidth", "shared-bandwidth"]
+    }
+    score = 0
+    flexibility_scores = []
+    for slo in slice_slos:
+        for nrp_slo in nrp_slos['slos']:
+            if slo["metric-type"] == nrp_slo["metric-type"]:
+                # Handle maximum type SLOs
+                if slo["metric-type"] in slo_type["max"]:
+                    logging.debug(f"SLO: {slo}, NRP SLO: {nrp_slo}")
+                    flexibility = (slo["bound"] - nrp_slo["bound"]) / slo["bound"]
+                    if slo["bound"] < nrp_slo["bound"]:
+                        return False, 0  # Does not meet maximum constraint
+                # Handle minimum type SLOs
+                if slo["metric-type"] in slo_type["min"]:
+                    logging.debug(f"SLO: {slo}, NRP SLO: {nrp_slo}")
+                    flexibility = (nrp_slo["bound"] - slo["bound"]) / slo["bound"]
+                    if slo["bound"] > nrp_slo["bound"]:
+                        return False, 0  # Does not meet minimum constraint
+                flexibility_scores.append(flexibility)
+                break  # Exit inner loop after finding matching metric
+        
+        # Calculate final viability score
+        score = sum(flexibility_scores) / len(flexibility_scores) if flexibility_scores else 0
+    return True, score  # Si pasó todas las verificaciones, la NRP es viable
\ No newline at end of file
diff --git a/src/nbi_processor/detect_format.py b/src/nbi_processor/detect_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b48be3eb90ef412079caf81092758333aff4a03
--- /dev/null
+++ b/src/nbi_processor/detect_format.py
@@ -0,0 +1,40 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+def detect_format(json_data):    
+    """
+    Detect the format of the input network slice intent.
+
+    This method identifies whether the input JSON is in 3GPP or IETF format 
+    by checking for specific keys in the JSON structure.
+
+    Args:
+        json_data (dict): Input network slice intent JSON
+
+    Returns:
+        str or None: 
+            - "IETF" if IETF-specific keys are found
+            - "3GPP" if 3GPP-specific keys are found
+            - None if no recognizable format is detected
+    """
+    # Check for IETF-specific key
+    if "ietf-network-slice-service:network-slice-services" in json_data:
+        return "IETF"
+    # Check for 3GPP-specific keys
+    if any(key in json_data for key in ["NetworkSlice1", "TopSliceSubnet1", "CNSliceSubnet1", "RANSliceSubnet1"]):
+        return "3GPP"
+    
+    return None
\ No newline at end of file
diff --git a/src/nbi_processor/main.py b/src/nbi_processor/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfa55ddfe27b0433a84fe69fcfc30d3b7e6fabf4
--- /dev/null
+++ b/src/nbi_processor/main.py
@@ -0,0 +1,56 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .detect_format import detect_format
+from .translator import translator
+
+def nbi_processor(intent_json):
+    """
+    Process and translate network slice intents from different formats (3GPP or IETF).
+
+    This method detects the input JSON format and converts 3GPP intents to IETF format.
+
+    Args:
+        intent_json (dict): Input network slice intent in either 3GPP or IETF format.
+
+    Returns:
+        list: A list of IETF-formatted network slice intents.
+
+    Raises:
+        ValueError: If the JSON request format is not recognized.
+    """
+    # Detect the input JSON format (3GPP or IETF)
+    format = detect_format(intent_json)
+    ietf_intents = []
+
+    # TODO Needs to be generalized to support different names of slicesubnets
+    # Process different input formats
+    if format == "3GPP":
+        # Translate each subnet in 3GPP format to IETF format
+        for subnet in intent_json["RANSliceSubnet1"]["networkSliceSubnetRef"]:
+            ietf_intents.append(translator(intent_json, subnet))
+        logging.info(f"3GPP requests translated to IETF template")
+    elif format == "IETF":
+        # If already in IETF format, add directly
+        logging.info(f"IETF intent received")
+        ietf_intents.append(intent_json)
+    else:
+        # Handle unrecognized format
+        logging.error(f"JSON request format not recognized")
+        raise ValueError("JSON request format not recognized")
+    
+    return ietf_intents or None
\ No newline at end of file
diff --git a/src/nbi_processor/translator.py b/src/nbi_processor/translator.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f23d19ecca78b635d66be1c969f95808a3ebdff
--- /dev/null
+++ b/src/nbi_processor/translator.py
@@ -0,0 +1,107 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import uuid, os
+from src.utils.load_template import load_template
+from src.config.constants import TEMPLATES_PATH
+
+def translator(gpp_intent, subnet):
+    """
+    Translate a 3GPP network slice intent to IETF format.
+
+    This method converts a 3GPP intent into a standardized IETF intent template, 
+    mapping key parameters such as QoS profiles, service endpoints, and connection details.
+
+    Notes:
+    - Generates a unique slice service ID using UUID
+    - Maps QoS requirements, source/destination endpoints
+    - Logs the translated intent to a JSON file for reference
+
+    Args:
+        gpp_intent (dict): Original 3GPP network slice intent
+        subnet (str): Specific subnet reference within the 3GPP intent
+
+    Returns:
+        dict: Translated IETF-formatted network slice intent
+    """
+    # Load IETF template and create a copy to modify
+    ietf_i = load_template(os.path.join(TEMPLATES_PATH, "ietf_template_empty.json"))
+
+    # Extract endpoint transport objects
+    ep_transport_objects = gpp_intent[subnet]["EpTransport"]
+
+    # Populate template with SLOs (currently supporting QoS profile, latency and bandwidth)
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = gpp_intent[ep_transport_objects[0]]["qosProfile"]
+    
+    profile = gpp_intent.get(subnet, {}).get("SliceProfileList", [{}])[0].get("RANSliceSubnetProfile", {})
+
+
+    metrics = {
+        ("uLThptPerSliceSubnet", "MaxThpt"): ("one-way-bandwidth", "kbps"),
+        ("uLLatency",): ("one-way-delay-maximum", "milliseconds"),
+        ("EnergyConsumption",): ("energy_consumption", "Joules"),
+        ("EnergyEfficiency",): ("energy_efficiency", "W/bps"),
+        ("CarbonEmissions",): ("carbon_emission", "gCO2eq"),
+        ("RenewableEnergyUsage",): ("renewable_energy_usage", "rate")
+    }
+
+    # Aux
+    def get_nested(d, keys):
+        for k in keys:
+            if isinstance(d, dict) and k in d:
+                d = d[k]
+            else:
+                return None
+        return d
+
+    for key_path, (metric_type, metric_unit) in metrics.items():
+        value = get_nested(profile, key_path)
+        if value is not None:
+            ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]\
+                ["slo-sle-template"][0]["slo-policy"]["metric-bound"].append({
+                "metric-type": metric_type,
+                "metric-unit": metric_unit,
+                "bound": value
+            })
+
+
+    # Generate unique slice service ID and description
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] = f"slice-service-{uuid.uuid4()}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = f"Transport network slice mapped with 3GPP slice {next(iter(gpp_intent))}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["slo-sle-policy"]["slo-sle-template"] = ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
+    
+    # Configure Source SDP
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["node-id"] = ep_transport_objects[0].split(" ", 1)[1]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[0]]["EpApplicationRef"][0]]["localAddress"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceType"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceId"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[0]]["IpAddress"] 
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[0]]["NextHopInfo"] 
+
+    # Configure Destination SDP
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["node-id"] = ep_transport_objects[1].split(" ", 1)[1]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[1]]["EpApplicationRef"][0]]["localAddress"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceType"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceId"]
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[1]]["IpAddress"] 
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[1]]["NextHopInfo"] 
+
+    # Configure Connection Group and match-criteria
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["connection-groups"]["connection-group"][0]["id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+    ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+
+    return ietf_i
\ No newline at end of file
diff --git a/src/network_slice_controller.py b/src/network_slice_controller.py
deleted file mode 100644
index 6ac70885c872dda4d18e919a602abd4f1a15c870..0000000000000000000000000000000000000000
--- a/src/network_slice_controller.py
+++ /dev/null
@@ -1,1259 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-#     http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file includes original contributions from Telefonica Innovación Digital S.L.
-
-import json, time, os, logging, uuid, traceback, sys
-from datetime import datetime
-from src.helpers import tfs_connector, cisco_connector
-from src.Constants import DEFAULT_LOGGING_LEVEL, TFS_IP, TFS_L2VPN_SUPPORT, IXIA_IP, SRC_PATH, TEMPLATES_PATH, DUMMY_MODE, DUMP_TEMPLATES, PLANNER_ENABLED, NRP_ENABLED, UPLOAD_TYPE, NBI_L2_PATH, NBI_L3_PATH
-from src.realizers.ixia.NEII_V4 import NEII_controller
-from src.planner.planner import Planner
-
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
-    level=DEFAULT_LOGGING_LEVEL,
-    format='%(levelname)s - %(message)s')
-
-class NSController:
-    """
-    Network Slice Controller (NSC) - A class to manage network slice creation, 
-    modification, and deletion across different network domains.
-
-    This controller handles the translation, mapping, and realization of network 
-    slice intents from different formats (3GPP and IETF) to network-specific 
-    configurations.
-
-    Key Functionalities:
-    - Intent Processing: Translate and process network slice intents
-    - Slice Management: Create, modify, and delete network slices
-    - NRP (Network Resource Partition) Mapping: Match slice requirements with available resources
-    - Slice Realization: Convert intents to specific network configurations (L2VPN, L3VPN)
-    """
-
-    def __init__(self, controller_type = "TFS", tfs_ip=TFS_IP, ixia_ip =IXIA_IP, need_l2vpn_support=TFS_L2VPN_SUPPORT): 
-        """
-        Initialize the Network Slice Controller.
-
-        Args:
-            controller_type (str): Flag to determine if configurations 
-                should be uploaded to Teraflow or IXIA system.
-            need_l2vpn_support (bool, optional): Flag to determine if additional
-                L2VPN configuration support is required. Defaults to False.
-        
-        Attributes:
-            controller_type (str): Flag for Teraflow or Ixia upload
-            answer (dict): Stores slice creation responses
-            start_time (float): Tracks slice setup start time
-            end_time (float): Tracks slice setup end time
-            need_l2vpn_support (bool): Flag for additional L2VPN configuration support
-        """
-        self.controller_type = controller_type
-        self.tfs_ip = tfs_ip
-        self.path = ""
-        self.answer = {}
-        self.cool_answer = {}
-        self.start_time = 0
-        self.end_time = 0
-        self.setup_time = 0
-        self.need_l2vpn_support = need_l2vpn_support
-        # Internal templates and views
-        self.__gpp_template = ""
-        self.__ietf_template = ""
-        self.__teraflow_template = ""
-        self.__nrp_view = ""
-        self.subnet=""
-
-    # API Methods
-    def add_flow(self, intent):
-        """
-        Create a new transport network slice.
-
-        Args:
-            intent (dict): Network slice intent in 3GPP or IETF format
-
-        Returns:
-            Result of the Network Slice Controller (NSC) operation
-
-        API Endpoint:
-            POST /slice
-
-        Raises:
-            ValueError: If no transport network slices are found
-            Exception: For unexpected errors during slice creation process
-        """
-        return self.nsc(intent)
-
-    def get_flows(self,slice_id=None):
-        """
-        Retrieve transport network slice information.
-
-        This method allows retrieving:
-        - All transport network slices
-        - A specific slice by its ID
-
-        Args:
-            slice_id (str, optional): Unique identifier of a specific slice. 
-                                      Defaults to None.
-
-        Returns:
-            dict or list: 
-            - If slice_id is provided: Returns the specific slice details
-            - If slice_id is None: Returns a list of all slices
-            - Returns an error response if no slices are found
-
-        API Endpoint:
-            GET /slice/{id}
-
-        Raises:
-            ValueError: If no transport network slices are found
-            Exception: For unexpected errors during file processing
-        """
-        try:
-            # Read slice database from JSON file
-            with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'r') as file:
-                content = json.load(file)
-            # If specific slice ID is provided, find and return matching slice
-            if slice_id:
-                for slice in content:
-                    if slice["slice_id"] == slice_id:
-                        return slice
-            # If no slices exist, raise an error
-            if len(content) == 0:
-                raise ValueError("Transport network slices not found")
-            
-            # Return all slices if no specific ID is given
-            return [slice for slice in content if slice.get("controller") == self.controller_type]
-        
-        except ValueError as e:
-            # Handle case where no slices are found
-            return self.__send_response(False, code=404, message=str(e))
-        except Exception as e:
-            # Handle unexpected errors
-            return self.__send_response(False, code=500, message=str(e))
-
-    def modify_flow(self,slice_id, intent):
-        """
-        Modify an existing transport network slice.
-
-        Args:
-            slice_id (str): Unique identifier of the slice to modify
-            intent (dict): New intent configuration for the slice
-
-        Returns:
-            Result of the Network Slice Controller (NSC) operation
-
-        API Endpoint:
-            PUT /slice/{id}
-        """
-        return self.nsc(intent, slice_id)
-
-    def delete_flows(self, slice_id=None):
-        """
-        Delete transport network slice(s).
-
-        This method supports:
-        - Deleting a specific slice by ID
-        - Deleting all slices
-        - Optional cleanup of L2VPN configurations
-
-        Args:
-            slice_id (str, optional): Unique identifier of slice to delete. 
-                                      Defaults to None.
-
-        Returns:
-            dict: Response indicating successful deletion or error details
-
-        API Endpoint:
-            DELETE /slice/{id}
-
-        Raises:
-            ValueError: If no slices are found to delete
-            Exception: For unexpected errors during deletion process
-
-        Notes:
-            - If controller_type is TFS, attempts to delete from Teraflow
-            - If need_l2vpn_support is True, performs additional L2VPN cleanup
-        """
-        try:
-            # Read current slice database
-            with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'r') as file:
-                content = json.load(file)
-            id = None
-
-            # Delete specific slice if slice_id is provided
-            if slice_id:
-                for i, slice in enumerate(content):
-                    if slice["slice_id"] == slice_id and slice.get("controller") == self.controller_type:
-                        del content[i]
-                        id = i
-                        break
-                # Raise error if slice not found
-                if id is None:
-                    raise ValueError("Transport network slice not found")
-                # Update slice database
-                with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'w') as file:
-                    json.dump(content, file, indent=4)
-                logging.info(f"Slice {slice_id} removed successfully")
-                return self.__send_response(False, code=200, status="success", message=f"Transpor network slice {slice_id} deleted successfully")
-            
-            # Delete all slices
-            else:
-                # Optional: Delete in Teraflow if configured
-                if self.controller_type == "TFS":
-                    # TODO: should send a delete request to Teraflow
-                    if self.need_l2vpn_support:
-                        self.__tfs_l2vpn_delete()
-
-                data_removed = [slice for slice in content if slice.get("controller") == self.controller_type] 
-
-                # Verify slices exist before deletion
-                if len(data_removed) == 0:
-                    raise ValueError("Transport network slices not found")
-
-                filtered_data = [slice for slice in content if slice.get("controller") != self.controller_type]    
-                # Clear slice database
-                with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'w') as file:
-                    json.dump(filtered_data, file, indent=4)
-
-                logging.info("All slices removed successfully")
-                return self.__send_response(False, code=200, status="success", message="All transport network slices deleted successfully.")
-        
-        except ValueError as e:
-            return self.__send_response(False, code=404, message=str(e))
-        except Exception as e:
-            return self.__send_response(False, code=500, message=str(e))
-
-    # Main NSC Functionalities    
-    def nsc(self, intent_json, slice_id=None):
-        """
-        Main Network Slice Controller method to process and realize network slice intents.
-
-        Workflow:
-        1. Load IETF template
-        2. Process intent (detect format, translate if needed)
-        3. Extract slice data
-        4. Store slice information
-        5. Map slice to Network Resource Pool (NRP)
-        6. Realize slice configuration
-        7. Upload to Teraflow (optional)
-
-        Args:
-            intent_json (dict): Network slice intent in 3GPP or IETF format
-            slice_id (str, optional): Existing slice identifier for modification
-
-        Returns:
-            tuple: Response status and HTTP status code
-        
-        """
-        try:
-            # Start performance tracking
-            self.start_time = time.perf_counter()
-
-            # Reset requests and load IETF template
-            self.__load_template(1, os.path.join(TEMPLATES_PATH, "ietf_template_empty.json"))  
-            requests = {"services":[]}
-
-            # Store the received template for debugging
-            if DUMP_TEMPLATES:
-                with open(os.path.join(TEMPLATES_PATH, "nbi_template.json"), "w") as file:
-                    file.write(json.dumps(intent_json,indent=2))
-            
-            # Process intent (translate if 3GPP)
-            ietf_intents = self.__nbi_processor(intent_json)
-
-            # Store the generated template for debugging
-            if DUMP_TEMPLATES:
-                with open(os.path.join(TEMPLATES_PATH, "ietf_template.json"), "w") as file:
-                    file.write(json.dumps(ietf_intents,indent=2))
-
-            if ietf_intents:
-                for intent in ietf_intents:
-                     # Extract and store slice request details
-                    self.__extract_data(intent)
-                    self.__store_data(intent, slice_id)       
-                    # Mapper
-                    self.__mapper(intent)
-                    # Realizer
-                    tfs_request = self.__realizer(intent)
-                    requests["services"].append(tfs_request)
-            else:
-                return self.__send_response(False, code=404, message="No intents found")
-            
-            # Store the generated template for debugging
-            if DUMP_TEMPLATES:
-                with open(os.path.join(TEMPLATES_PATH, "realizer_template.json"), "w") as archivo:
-                    archivo.write(json.dumps(requests,indent=2))
-            
-            # Optional: Upload template to Teraflow
-            if not DUMMY_MODE:
-                if self.controller_type == "TFS":
-                    if UPLOAD_TYPE == "WEBUI":
-                        response = tfs_connector().webui_post(self.tfs_ip, requests)
-                    elif UPLOAD_TYPE == "NBI":
-                        for intent in requests["services"]:
-                            # Send each separate NBI request
-                            response = tfs_connector().nbi_post(self.tfs_ip, intent, self.path)
-
-                            if not response.ok:
-                                return self.__send_response(False, code=response.status_code, message=f"Teraflow upload failed. Response: {response.text}")
-                    
-                    # For deploying an L2VPN with path selection (not supported by Teraflow)
-                    if self.need_l2vpn_support:
-                        self.__tfs_l2vpn_support(requests["services"])
-
-                    logging.info("Request sent to Teraflow")
-                elif self.controller_type == "IXIA":
-                    neii_controller = NEII_controller()
-                    for intent in requests["services"]:
-                        # Send each separate IXIA request
-                        neii_controller.nscNEII(intent)
-                    logging.info("Requests sent to Ixia")
-
-            # End performance tracking
-            self.end_time = time.perf_counter()
-            return self.__send_response(True, code=200)
-
-        except ValueError as e:
-            return self.__send_response(False, code=400, message=str(e))
-        except Exception as e:
-            return self.__send_response(False, code=500, message=str(e))
-        
-    def __nbi_processor(self, intent_json):
-        """
-        Process and translate network slice intents from different formats (3GPP or IETF).
-
-        This method detects the input JSON format and converts 3GPP intents to IETF format.
-        Supports multiple slice subnets in 3GPP format.
-
-        Args:
-            intent_json (dict): Input network slice intent in either 3GPP or IETF format.
-
-        Returns:
-            list: A list of IETF-formatted network slice intents.
-
-        Raises:
-            ValueError: If the JSON request format is not recognized.
-        """
-        # Detect the input JSON format (3GPP or IETF)
-        format = self.__detect_format(intent_json)
-        ietf_intents = []
-
-        # TODO Needs to be generalized to support different names of slicesubnets
-        # Process different input formats
-        if format == "3GPP":
-            # Translate each subnet in 3GPP format to IETF format
-            for subnet in intent_json["RANSliceSubnet1"]["networkSliceSubnetRef"]:
-                ietf_intents.append(self.__translator(intent_json, subnet))
-            logging.info(f"3GPP requests translated to IETF template")
-        elif format == "IETF":
-            # If already in IETF format, add directly
-            logging.info(f"IETF intent received")
-            ietf_intents.append(intent_json)
-        else:
-            # Handle unrecognized format
-            logging.error(f"JSON request format not recognized")
-            raise ValueError("JSON request format not recognized")
-        
-        return ietf_intents
-
-    def __mapper(self, ietf_intent):
-        """
-        Map an IETF network slice intent to the most suitable Network Resource Partition (NRP).
-
-        This method:
-        1. Retrieves the current NRP view
-        2. Extracts Service Level Objectives (SLOs) from the intent
-        3. Finds NRPs that can meet the SLO requirements
-        4. Selects the best NRP based on viability and availability
-        5. Attaches the slice to the selected NRP or creates a new one
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Raises:
-            Exception: If no suitable NRP is found and slice creation fails.
-        """ 
-        if NRP_ENABLED:
-            # Retrieve NRP view
-            self.__realizer(None, True, "READ")
-
-            # Extract Service Level Objectives (SLOs) from the intent
-            slos = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
-
-            if slos:
-                # Find candidate NRPs that can meet the SLO requirements
-                candidates = [
-                    (nrp, self.__slo_viability(slos, nrp)[1]) 
-                    for nrp in self.__nrp_view 
-                    if self.__slo_viability(slos, nrp)[0] and nrp["available"]
-                ]
-                logging.debug(f"Candidates: {candidates}")
-
-                # Select the best NRP based on candidates
-                best_nrp = max(candidates, key=lambda x: x[1])[0] if candidates else None
-                logging.debug(f"Best NRP: {best_nrp}")
-
-                if best_nrp:
-                    best_nrp["slices"].append(ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"])
-                    # Update NRP view
-                    self.__realizer(ietf_intent, True, "UPDATE")
-                    # TODO Here we should put how the slice is attached to an already created nrp
-                else: 
-                    # Request the controller to create a new NRP that meets the SLOs
-                    answer = self.__realizer(ietf_intent, True, "CREATE", best_nrp)
-                    if not answer:
-                        raise Exception("Slice rejected due to lack of NRPs") 
-                    # TODO Here we should put how the slice is attached to the new nrp
-        
-        if PLANNER_ENABLED:
-            optimal_path = Planner().planner(ietf_intent)
-
-            logging.info(f"Optimal path: {optimal_path}")
-
-    def __realizer(self, ietf_intent, need_nrp=False, order=None, nrp=None):
-        """
-        Manage the slice creation workflow.
-
-        This method handles two primary scenarios:
-        1. Interact with network controllers for NRP (Network Resource Partition) operations when need_nrp is True
-        2. Slice service selection when need_nrp is False
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-            need_nrp (bool, optional): Flag to indicate if NRP operations are needed. Defaults to False.
-            order (str, optional): Type of NRP operation (READ, UPDATE, CREATE). Defaults to None.
-            nrp (dict, optional): Specific Network Resource Partition to operate on. Defaults to None.
-        """
-        if need_nrp:
-            # Perform NRP-related operations
-            self.__nrp(order, nrp)
-        else:
-            # Select slice service method
-            way = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"]["tag-type"]["value"]
-            way = "L3VPN"
-            return self.__select_way(controller=self.controller_type, way=way, ietf_intent=ietf_intent)
-
-    ### Generic functionalities
-    def __load_template(self, which, dir_t):
-        """
-        Load and process JSON templates for different network slice formats.
-
-        Args:
-            which (int): Template selector (0: 3GPP, 1: IETF, other: Teraflow)
-            dir_t (str): Directory path to the template file
-        """
-        try:
-            # Open and read the template file
-            with open(dir_t, 'r') as source:
-                # Clean up the JSON template
-                template = source.read().replace('\t', '').replace('\n', '').replace("'", '"').strip()
-                
-                # Store template based on selector
-                if which == 0:
-                    self.__gpp_template = template
-                elif which == 1:
-                    self.__ietf_template = template
-                else:
-                    self.__teraflow_template = template
-                
-        except Exception as e:
-            logging.error(f"Template loading error: {e}")
-            return self.__send_response(False, code=500, message=f"Template loading error: {e}")
-
-    def __send_response(self, result, status="error", message=None, code=None):
-        """
-        Generate and send a response to the 3GPP client about the slice request.
-
-        Args:
-            result (bool): Indicates whether the slice request was successful
-            status (str, optional): Response status. Defaults to "error"
-            message (str, optional): Additional error message. Defaults to None
-            code (str, optional): Response code. Defaults to None
-
-        Returns:
-            tuple: A tuple containing the response dictionary and status code
-        """    
-        if result:
-            # Successful slice creation
-            logging.info("Your slice request was fulfilled sucessfully")
-            self.setup_time = (self.end_time - self.start_time)*1000
-            logging.info(f"Setup time: {self.setup_time:.2f}")
-
-            # Construct detailed successful response
-            answer = {
-                "status": "success",
-                "code": code,
-                "slices": [],
-                "setup_time": self.setup_time
-            }
-            # Add slice details to the response
-            for subnet in self.answer:
-                slice_info = {
-                    "id": subnet,
-                    "source": self.answer[subnet]["Source"],
-                    "destination": self.answer[subnet]["Destination"],
-                    "vlan": self.answer[subnet]["VLAN"],
-                    "requirements": self.answer[subnet]["QoS Requirements"],
-                }
-                answer["slices"].append(slice_info)
-            self.cool_answer = answer
-        else:
-            # Failed slice creation
-            logging.info("Your request cannot be fulfilled. Reason: "+message)
-            self.cool_answer = {
-                "status" :status,
-                "code": code,
-                "message": message
-            }
-        return self.cool_answer, code
-
-    def __extract_data(self, intent_json):
-        """
-        Extract source and destination IP addresses from the IETF intent.
-
-        Args:
-            intent_json (dict): IETF-formatted network slice intent
-        """
-        # Extract source and destination IP addresses
-        source = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"]
-        destination = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"]
-
-        logging.info(f"Intent generated between {source} and {destination}") 
-
-        # Store slice and connection details
-        self.subnet = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-        self.subnet = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-        self.answer[self.subnet] = {
-            "Source": source,
-            "Destination": destination
-        }
-    
-    def __store_data(self, intent, slice_id):
-        """
-        Store network slice intent information in a JSON database file.
-
-        This method:
-        1. Creates a JSON file if it doesn't exist
-        2. Reads existing content
-        3. Updates or adds new slice intent information
-
-        Args:
-            intent (dict): Network slice intent to be stored
-            slice_id (str, optional): Existing slice ID to update. Defaults to None.
-        """
-        file_path = os.path.join(SRC_PATH, "slice_ddbb.json")
-        # Create initial JSON file if it doesn't exist
-        if not os.path.exists(file_path):
-            with open(file_path, 'w') as file:
-                json.dump([], file, indent=4)
-
-        # Read existing content
-        with open(file_path, 'r') as file:
-            content = json.load(file)
-    
-        # Update or add new slice intent
-        if slice_id:
-            # Update existing slice intent
-            for slice in content:
-                if slice["slice_id"] == slice_id:
-                    slice["intent"] = intent
-        else:
-            # Add new slice intent
-            content.append(
-                {
-                    "slice_id": intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"],
-                    "intent": intent,
-                    "controller": self.controller_type,
-                })
-        
-        # # Write updated content back to file
-        with open(file_path, 'w') as file:
-            json.dump(content, file, indent=4)
-
-    ### NBI processor functionalities
-    def __detect_format(self,json_data):    
-        """
-        Detect the format of the input network slice intent.
-
-        This method identifies whether the input JSON is in 3GPP or IETF format 
-        by checking for specific keys in the JSON structure.
-
-        Args:
-            json_data (dict): Input network slice intent JSON
-
-        Returns:
-            str or None: 
-                - "IETF" if IETF-specific keys are found
-                - "3GPP" if 3GPP-specific keys are found
-                - None if no recognizable format is detected
-        """
-        # Check for IETF-specific key
-        if "ietf-network-slice-service:network-slice-services" in json_data:
-            return "IETF"
-        # Check for 3GPP-specific keys
-        if any(key in json_data for key in ["NetworkSlice1", "TopSliceSubnet1", "CNSliceSubnet1", "RANSliceSubnet1"]):
-            return "3GPP"
-        
-        return None
-    
-    def __translator(self, gpp_intent, subnet):
-        """
-        Translate a 3GPP network slice intent to IETF format.
-
-        This method converts a 3GPP intent into a standardized IETF intent template, 
-        mapping key parameters such as QoS profiles, service endpoints, and connection details.
-
-        Args:
-            gpp_intent (dict): Original 3GPP network slice intent
-            subnet (str): Specific subnet reference within the 3GPP intent
-
-        Returns:
-            dict: Translated IETF-formatted network slice intent
-        
-        Notes:
-            - Generates a unique slice service ID using UUID
-            - Maps QoS requirements, source/destination endpoints
-            - Logs the translated intent to a JSON file for reference
-        """
-        # Load IETF template and create a copy to modify
-        ietf_i = json.loads(str(self.__ietf_template))
-
-        # Extract endpoint transport objects
-        ep_transport_objects = gpp_intent[subnet]["EpTransport"]
-
-        # Populate template with SLOs (currently supporting QoS profile, latency and bandwidth)
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = gpp_intent[ep_transport_objects[0]]["qosProfile"]
-        
-        profile = gpp_intent.get(subnet, {}).get("SliceProfileList", [{}])[0].get("RANSliceSubnetProfile", {})
-
-    
-        metrics = {
-            ("uLThptPerSliceSubnet", "MaxThpt"): ("one-way-bandwidth", "kbps"),
-            ("uLLatency",): ("one-way-delay-maximum", "milliseconds"),
-            ("EnergyConsumption",): ("energy_consumption", "Joules"),
-            ("EnergyEfficiency",): ("energy_efficiency", "W/bps"),
-            ("CarbonEmissions",): ("carbon_emission", "gCO2eq"),
-            ("RenewableEnergyUsage",): ("renewable_energy_usage", "rate")
-        }
-
-        # Aux
-        def get_nested(d, keys):
-            for k in keys:
-                if isinstance(d, dict) and k in d:
-                    d = d[k]
-                else:
-                    return None
-            return d
-
-        for key_path, (metric_type, metric_unit) in metrics.items():
-            value = get_nested(profile, key_path)
-            if value is not None:
-                ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]\
-                    ["slo-sle-template"][0]["slo-policy"]["metric-bound"].append({
-                    "metric-type": metric_type,
-                    "metric-unit": metric_unit,
-                    "bound": value
-                })
-
-
-        # Generate unique slice service ID and description
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] = f"slice-service-{uuid.uuid4()}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = f"Transport network slice mapped with 3GPP slice {next(iter(gpp_intent))}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["slo-sle-policy"]["slo-sle-template"] = ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
-        
-        # Configure Source SDP
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["node-id"] = ep_transport_objects[0].split(" ", 1)[1]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[0]]["EpApplicationRef"][0]]["localAddress"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceType"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceId"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[0]]["IpAddress"] 
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[0]]["NextHopInfo"] 
-
-        # Configure Destination SDP
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["node-id"] = ep_transport_objects[1].split(" ", 1)[1]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[1]]["EpApplicationRef"][0]]["localAddress"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceType"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceId"]
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[1]]["IpAddress"] 
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[1]]["NextHopInfo"] 
-
-        # Configure Connection Group and match-criteria
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["connection-groups"]["connection-group"][0]["id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
-        ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
-
-        return ietf_i
-    
-    ### Mapper functionalities
-    def __slo_viability(self, slice_slos, nrp_slos):
-        """
-        Compare Service Level Objectives (SLOs) between a slice and a Network Resource Partition (NRP).
-
-        This method assesses whether an NRP can satisfy the SLOs of a network slice.
-
-        Args:
-            slice_slos (list): Service Level Objectives of the slice
-            nrp_slos (dict): Service Level Objectives of the Network Resource Pool
-
-        Returns:
-            tuple: A boolean indicating viability and a flexibility score
-                - First value: True if NRP meets SLOs, False otherwise
-                - Second value: A score representing how well the NRP meets the SLOs
-        """
-        # Define SLO types for maximum and minimum constraints
-        slo_type = {
-            "max": ["one-way-delay-maximum", "two-way-delay-maximum", "one-way-delay-percentile", "two-way-delay-percentile",
-                    "one-way-delay-variation-maximum", "two-way-delay-variation-maximum",
-                    "one-way-delay-variation-percentile", "two-way-delay-variation-percentile",
-                    "one-way-packet-loss", "two-way-packet-loss"],
-            "min": ["one-way-bandwidth", "two-way-bandwidth", "shared-bandwidth"]
-        }
-        flexibility_scores = []
-        for slo in slice_slos:
-            for nrp_slo in nrp_slos['slos']:
-                if slo["metric-type"] == nrp_slo["metric-type"]:
-                    # Handle maximum type SLOs
-                    if slo["metric-type"] in slo_type["max"]:
-                        flexibility = (nrp_slo["bound"] - slo["bound"]) / slo["bound"]
-                        if slo["bound"] > nrp_slo["bound"]:
-                            return False, 0  # Does not meet maximum constraint
-                    # Handle minimum type SLOs
-                    if slo["metric-type"] in slo_type["min"]:
-                        flexibility = (slo["bound"] - nrp_slo["bound"]) / slo["bound"]
-                        if slo["bound"] < nrp_slo["bound"]:
-                            return False, 0  # Does not meet minimum constraint
-                    flexibility_scores.append(flexibility)
-                    break  # Exit inner loop after finding matching metric
-            
-            # Calculate final viability score
-            score = sum(flexibility_scores) / len(flexibility_scores) if flexibility_scores else 0
-        return True, score  # Si pasó todas las verificaciones, la NRP es viable
-    
-    ### Realizer functionalities.
-    def __nrp(self, request, nrp):
-        """
-        Manage Network Resource Partition (NRP) operations.
-
-        This method handles CRUD operations for Network Resource Partitions,
-        interacting with Network Controllers (currently done statically via a JSON-based database file).
-
-        Args:
-            request (str): The type of operation to perform. 
-                Supported values:
-                - "CREATE": Add a new NRP to the database
-                - "READ": Retrieve the current NRP view
-                - "UPDATE": Update an existing NRP (currently a placeholder)
-
-            nrp (dict): The Network Resource Partition details to create or update.
-
-        Returns:
-            None or answer: 
-            - For "CREATE": Returns the response from the controller (currently using a static JSON)
-            - For "READ": Gets the NRP view from the controller (currently using a static JSON)
-            - For "UPDATE": Placeholder for update functionality
-
-        Notes:
-            - Uses a local JSON file "nrp_ddbb.json" to store NRP information as controller operation is not yet defined
-        """
-        if request == "CREATE":
-            # TODO: Implement actual request to Controller to create an NRP
-            logging.debug("Creating NRP")
-
-            # Load existing NRP database
-            with open(os.path.join(SRC_PATH, "nrp_ddbb.json"), "r") as archivo:
-                self.__nrp_view = json.load(archivo)
-
-            # Append new NRP to the view
-            self.__nrp_view.append(nrp)
-
-            # Placeholder for controller POST request
-            answer = None
-            return answer
-        elif request == "READ":
-            # TODO: Request to Controller to get topology and current NRP view
-            logging.debug("Reading Topology")
-
-            # Load NRP database
-            with open(os.path.join(SRC_PATH, "nrp_ddbb.json"), "r") as archivo:
-                self.__nrp_view = json.load(archivo)
-            
-        elif request == "UPDATE":
-            # TODO: Implement request to Controller to update NRP
-            logging.debug("Updating NRP")
-            answer = ""
-    
-    def __select_way(self, controller=None, way=None, ietf_intent=None):
-        """
-        Determine the method of slice realization.
-
-        Args:
-            controller (str): The controller to use for slice realization.
-                Supported values:
-                - "IXIA": IXIA NEII for network testing
-                - "TFS": TeraFlow Service for network slice management
-            way (str): The type of technology to use.
-                Supported values:
-                - "L2VPN": Layer 2 Virtual Private Network
-                - "L3VPN": Layer 3 Virtual Private Network
-
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: A realization request for the specified network slice type.
-
-        """
-        realizing_request = None
-        if controller == "TFS":
-            if way == "L2VPN":
-                realizing_request = self.__tfs_l2vpn(ietf_intent)
-            elif way == "L3VPN":
-                realizing_request = self.__tfs_l3vpn(ietf_intent)
-            else:
-                logging.warning(f"Unsupported way: {way}. Defaulting to L2VPN realization.")
-                realizing_request = self.__tfs_l2vpn(ietf_intent)
-        elif controller == "IXIA":
-            realizing_request = self.__ixia(ietf_intent)
-        else:
-            logging.warning(f"Unsupported controller: {controller}. Defaulting to TFS L2VPN realization.")
-            realizing_request = self.__tfs_l2vpn(ietf_intent)
-        return realizing_request
-
-    def __tfs_l2vpn(self, ietf_intent):
-        """
-        Translate slice intent into a TeraFlow service request.
-
-        This method prepares a L2VPN service request by:
-        1. Defining endpoint routers
-        2. Loading a service template
-        3. Generating a unique service UUID
-        4. Configuring service endpoints
-        5. Adding QoS constraints
-        6. Preparing configuration rules for network interfaces
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: A TeraFlow service request for L2VPN configuration.
-
-        """
-        # Hardcoded router endpoints
-        # TODO (should be dynamically determined)
-        origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-        destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-
-        # Extract QoS Profile from intent
-        QoSProfile = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
-        vlan_value = 0
-
-        self.answer[self.subnet]["QoS Requirements"] = []
-
-        # Populate response with QoS requirements and VLAN from intent
-        slo_policy = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]
-
-        # Process metrics
-        for metric in slo_policy.get("metric-bound", []):
-            constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
-            constraint_value = str(metric["bound"])
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": constraint_type,
-                "constraint_value": constraint_value
-            })
-
-        # Availability
-        if "availability" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "availability[%]",
-                "constraint_value": str(slo_policy["availability"])
-            })
-
-        # MTU
-        if "mtu" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "mtu[bytes]",
-                "constraint_value": str(slo_policy["mtu"])
-            })
-
-        # VLAN
-        vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
-        self.answer[self.subnet]["VLAN"] = vlan_value
-
-        if UPLOAD_TYPE == "WEBUI":
-            # Load L2VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "L2-VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))["services"][0]
-
-            # Generate unique service UUID
-            tfs_request["service_id"]["service_uuid"]["uuid"] += "-" + str(int(datetime.now().timestamp() * 1e7))
-
-            # Configure service endpoints
-            for endpoint in tfs_request["service_endpoint_ids"]:
-                endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
-                endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
-
-            # Add service constraints
-            for constraint in self.answer[self.subnet]["QoS Requirements"]:
-                tfs_request["service_constraints"].append({"custom": constraint})
-
-            # Add configuration rules
-            for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
-                router_id = origin_router_id if i == 1 else destination_router_id
-                router_if = origin_router_if if i == 1 else destination_router_if
-                resource_value = config_rule["custom"]["resource_value"]
-
-                sdp_index = i - 1
-                vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
-                if vlan_value:
-                    resource_value["vlan_id"] = int(vlan_value)
-                resource_value["circuit_id"] = vlan_value
-                resource_value["remote_router"] = destination_router_id if i == 1 else origin_router_id
-                resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
-                config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
-
-        elif UPLOAD_TYPE == "NBI":
-            self.path = NBI_L2_PATH
-            # Load IETF L2VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "ietfL2VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))
-
-            # Generate service UUID
-            full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-            uuid_only = full_id.split("slice-service-")[-1]
-            tfs_request["ietf-l2vpn-svc:vpn-service"][0]["vpn-id"] = uuid_only
-
-            # Configure service endpoints
-            sites = tfs_request["ietf-l2vpn-svc:vpn-service"][0]["site"]
-            sdps = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"]
-
-            for i, site in enumerate(sites):
-                is_origin = (i == 0)
-                router_id = origin_router_id if is_origin else destination_router_id
-                sdp = sdps[0] if is_origin else sdps[1]
-                site["site-id"] = router_id
-                site["site-location"] = sdp["node-id"]
-                site["site-network-access"]["interface"]["ip-address"] = sdp["sdp-ip-address"]
-
-        logging.info(f"L2VPN Intent realized\n")
-        return tfs_request
-    
-    def __tfs_l2vpn_support(self, requests):
-        """
-        Configuration support for L2VPN with path selection based on MPLS traffic-engineering tunnels
-
-        Args:
-            requests (list): A list of configuration parameters.
-
-        """
-        sources={
-            "source": "10.60.125.44",
-            "config":[]
-        }
-        destinations={
-            "destination": "10.60.125.45",
-            "config":[]
-        }
-        for request in requests:
-            # Configure Source Endpoint
-            temp_source = request["service_config"]["config_rules"][1]["custom"]["resource_value"]
-            endpoints = request["service_endpoint_ids"]
-            config = {
-                "ni_name": temp_source["ni_name"],
-                "remote_router": temp_source["remote_router"],
-                "interface": endpoints[0]["endpoint_uuid"]["uuid"].replace("0/0/0-", ""),
-                "vlan" : temp_source["vlan_id"],
-                "number" : temp_source["vlan_id"] % 10 + 1
-            }
-            sources["config"].append(config)
-
-            # Configure Destination Endpoint
-            temp_destiny = request["service_config"]["config_rules"][2]["custom"]["resource_value"]
-            config = {
-                "ni_name": temp_destiny["ni_name"],
-                "remote_router": temp_destiny["remote_router"],
-                "interface": endpoints[1]["endpoint_uuid"]["uuid"].replace("0/0/3-", ""),
-                "vlan" : temp_destiny["vlan_id"],
-                "number" : temp_destiny["vlan_id"] % 10 + 1
-            }
-            destinations["config"].append(config)
-         
-        #cisco_source = cisco_connector(source_address, ni_name, remote_router, vlan, vlan % 10 + 1)
-        cisco_source = cisco_connector(sources["source"], sources["config"])
-        commands = cisco_source.full_create_command_template()
-        cisco_source.execute_commands(commands)
-
-        #cisco_destiny = cisco_connector(destination_address, ni_name, remote_router, vlan, vlan % 10 + 1)
-        cisco_destiny = cisco_connector(destinations["destination"], destinations["config"])
-        commands = cisco_destiny.full_create_command_template()
-        cisco_destiny.execute_commands(commands)
-
-    def __tfs_l2vpn_delete(self):
-        """
-        Delete L2VPN configurations from Cisco devices.
-
-        This method removes L2VPN configurations from Cisco routers
-
-        Notes:
-            - Uses cisco_connector to generate and execute deletion commands
-            - Clears Network Interface (NI) settings
-        """
-        # Delete Source Endpoint Configuration
-        source_address = "10.60.125.44"
-        cisco_source = cisco_connector(source_address)
-        cisco_source.execute_commands(cisco_source.create_command_template_delete())
-
-        # Delete Destination Endpoint Configuration
-        destination_address = "10.60.125.45"
-        cisco_destiny = cisco_connector(destination_address)
-        cisco_destiny.execute_commands(cisco_destiny.create_command_template_delete())
-    
-    def __tfs_l3vpn(self, ietf_intent):
-        """
-        Translate L3VPN (Layer 3 Virtual Private Network) intent into a TeraFlow service request.
-
-        Similar to __tfs_l2vpn, but configured for Layer 3 VPN:
-        1. Defines endpoint routers
-        2. Loads service template
-        3. Generates unique service UUID
-        4. Configures service endpoints
-        5. Adds QoS constraints
-        6. Prepares configuration rules for network interfaces
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: A TeraFlow service request for L3VPN configuration.
-        """
-        # Hardcoded router endpoints
-        # TODO (should be dynamically determined)
-        origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-        destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
-        destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-
-        # Extract QoS Profile from intent
-        QoSProfile = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
-        vlan_value = 0
-
-        self.answer[self.subnet]["QoS Requirements"] = []
-
-        # Populate response with QoS requirements and VLAN from intent
-        slo_policy = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]
-
-        # Process metrics
-        for metric in slo_policy.get("metric-bound", []):
-            constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
-            constraint_value = str(metric["bound"])
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": constraint_type,
-                "constraint_value": constraint_value
-            })
-
-        # Availability
-        if "availability" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "availability[%]",
-                "constraint_value": str(slo_policy["availability"])
-            })
-
-        # MTU
-        if "mtu" in slo_policy:
-            self.answer[self.subnet]["QoS Requirements"].append({
-                "constraint_type": "mtu[bytes]",
-                "constraint_value": str(slo_policy["mtu"])
-            })
-
-        # VLAN
-        vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
-        self.answer[self.subnet]["VLAN"] = vlan_value
-
-        if UPLOAD_TYPE == "WEBUI":
-            # Load L3VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "L3-VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))["services"][0]
-            
-            # Generate unique service UUID
-            tfs_request["service_id"]["service_uuid"]["uuid"] += "-" + str(int(datetime.now().timestamp() * 1e7))
-
-            # Configure service endpoints
-            for endpoint in tfs_request["service_endpoint_ids"]:
-                endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
-                endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
-
-            # Add service constraints
-            for constraint in self.answer[self.subnet]["QoS Requirements"]:
-                tfs_request["service_constraints"].append({"custom": constraint})
-
-            # Add configuration rules
-            for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
-                router_id = origin_router_id if i == 1 else destination_router_id
-                router_if = origin_router_if if i == 1 else destination_router_if
-                resource_value = config_rule["custom"]["resource_value"]
-
-                sdp_index = i - 1
-                vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
-                resource_value["router_id"] = destination_router_id if i == 1 else origin_router_id
-                resource_value["vlan_id"] = int(vlan_value)
-                resource_value["address_ip"] = destination_router_id if i == 1 else origin_router_id
-                resource_value["policy_AZ"] = "policyA"
-                resource_value["policy_ZA"] = "policyB"
-                resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
-                config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
-        
-        elif UPLOAD_TYPE == "NBI":
-            self.path = NBI_L3_PATH
-            # Load IETF L3VPN service template
-            self.__load_template(2, os.path.join(TEMPLATES_PATH, "ietfL3VPN_template_empty.json"))
-            tfs_request = json.loads(str(self.__teraflow_template))
-
-            # Generate service UUID
-            full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
-            tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["vpn-services"]["vpn-service"][0]["vpn-id"] = full_id
-            # Configure service endpoints
-            for i, site in enumerate(tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["sites"]["site"]):
-
-                # Determine if origin or destination
-                is_origin = (i == 0)
-                sdp_index = 0 if is_origin else 1
-                location = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["node-id"]
-                router_id = origin_router_id if is_origin else destination_router_id
-                router_if = origin_router_if if is_origin else destination_router_if
-
-                # Assign common values
-                site["site-id"] = f"site_{location}"
-                site["locations"]["location"][0]["location-id"] = location
-                site["devices"]["device"][0]["device-id"] = router_id
-                site["devices"]["device"][0]["location"] = location
-
-                access = site["site-network-accesses"]["site-network-access"][0]
-                access["site-network-access-id"] = router_if
-                access["device-reference"] = router_id
-                access["vpn-attachment"]["vpn-id"] = full_id
-
-                # Aplicar restricciones QoS
-                for constraint in self.answer[self.subnet]["QoS Requirements"]:
-                    ctype = constraint["constraint_type"]
-                    cvalue = float(constraint["constraint_value"])
-                    if constraint["constraint_type"].startswith("one-way-bandwidth"):
-                            unit = constraint["constraint_type"].split("[")[-1].rstrip("]")
-                            multiplier = {"bps": 1, "kbps": 1_000, "Mbps": 1_000_000, "Gbps": 1_000_000_000}.get(unit, 1)
-                            value = int(cvalue * multiplier)
-                            access["service"]["svc-input-bandwidth"] = value
-                            access["service"]["svc-output-bandwidth"] = value
-                    elif ctype == "one-way-delay-maximum[milliseconds]":
-                        access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["latency"]["latency-boundary"] = int(cvalue)
-                    elif ctype == "availability[%]":
-                        access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["bandwidth"]["guaranteed-bw-percent"] = int(cvalue)
-                    elif ctype == "mtu[bytes]":
-                        access["service"]["svc-mtu"] = int(cvalue)
-
-        
-        logging.info(f"L3VPN Intent realized\n")
-        self.answer[self.subnet]["VLAN"] = vlan_value
-        return tfs_request
-
-    def __ixia(self, ietf_intent):
-        """
-        Prepare an Ixia service request based on the IETF intent.
-
-        This method configures an Ixia service request by:
-        1. Defining endpoint routers
-        2. Loading a service template
-        3. Generating a unique service UUID
-        4. Configuring service endpoints
-        5. Adding QoS constraints
-
-        Args:
-            ietf_intent (dict): IETF-formatted network slice intent.
-
-        Returns:
-            dict: An Ixia service request for configuration.
-        """
-        self.answer[self.subnet]["QoS Requirements"] = []
-                # Add service constraints
-        for i, constraint in enumerate(ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]):
-            bound = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["bound"]
-            metric_type = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["metric-type"]
-            metric_unit = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["metric-unit"]
-            service_constraint ={
-                "custom": {
-                    "constraint_type": f"{metric_type}[{metric_unit}]",
-                    "constraint_value": f"{bound}"
-                }
-            }
-            self.answer[self.subnet]["QoS Requirements"].append(service_constraint["custom"])
-        self.answer[self.subnet]["VLAN"] = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
-        # Extraer la lista de métricas de forma segura
-        metric_bounds = ietf_intent.get("ietf-network-slice-service:network-slice-services", {}) \
-            .get("slo-sle-templates", {}) \
-            .get("slo-sle-template", [{}])[0] \
-            .get("slo-policy", {}) \
-            .get("metric-bound", [])
-
-        # Inicializar valores
-        bandwidth = None
-        latency = None
-        tolerance = None
-
-        # Asignar valores según el tipo de métrica
-        for metric in metric_bounds:
-            metric_type = metric.get("metric-type")
-            bound = metric.get("bound")
-
-            if metric_type == "one-way-bandwidth":
-                bandwidth = bound
-            elif metric_type == "one-way-delay-maximum":
-                latency = bound
-            elif metric_type == "one-way-delay-variation-maximum": 
-                tolerance = bound
-
-        # Construcción del diccionario intent
-        intent = {
-            "src_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slice-service", [{}])[0]
-                .get("sdps", {}).get("sdp", [{}])[0]
-                .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
-                .get("sdp-peering", {}).get("peer-sap-id"),
-
-            "dst_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slice-service", [{}])[0]
-                .get("sdps", {}).get("sdp", [{}, {}])[1]
-                .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
-                .get("sdp-peering", {}).get("peer-sap-id"),
-
-            "vlan_id": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slice-service", [{}])[0]
-                .get("sdps", {}).get("sdp", [{}])[0]
-                .get("service-match-criteria", {}).get("match-criterion", [{}])[0]
-                .get("value"),
-
-            "bandwidth": bandwidth,
-            "latency": latency,
-            "tolerance": tolerance,
-
-            "latency_version": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
-                .get("description"),
-
-            "reliability": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
-                .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
-                .get("sle-policy", {}).get("reliability"),
-        }
-
-        logging.info(f"IXIA Intent realized\n")
-        return intent
-    
diff --git a/src/planner/energy_planner/energy.py b/src/planner/energy_planner/energy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6d33c835e51044d4f3ec29159a3349a9e549fc0
--- /dev/null
+++ b/src/planner/energy_planner/energy.py
@@ -0,0 +1,393 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, random, os, json, heapq  
+from src.config.constants import SRC_PATH
+from flask import current_app
+from src.utils.safe_get import safe_get
+
+
+def energy_planner(intent):
+    """
+    Plan an optimal network path based on energy consumption metrics.
+
+    This function calculates the most energy-efficient path between source
+    and destination nodes, considering energy consumption, carbon emissions,
+    energy efficiency, and renewable energy usage constraints.
+
+    Args:
+        intent (dict): Network slice intent containing service delivery points
+                      and energy-related SLO constraints
+
+    Returns:
+        list or None: Ordered list of node names representing the optimal path,
+                     or None if no valid path is found or topology is not recognized
+
+    Notes:
+        - Only supports topology with nodes A through G
+        - Can use external PCE or internal Dijkstra-based algorithm
+        - Considers DLOS (Delay and Loss Objectives) for energy metrics:
+          EC (Energy Consumption), CE (Carbon Emission), 
+          EE (Energy Efficiency), URE (Renewable Energy Usage)
+
+    Raises:
+        Exception: For errors in energy metrics or topology retrieval
+    """    
+    energy_metrics = retrieve_energy()
+    topology = retrieve_topology()
+    source = safe_get(intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 0, "node-id"])
+    destination = safe_get(intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 1, "node-id"])
+    optimal_path = []
+    allowed_ids = {"A", "B", "C", "D", "E", "F", "G"}
+
+    if source not in allowed_ids or destination not in allowed_ids:
+        logging.warning(f"Topology not recognized (source: {source}, destination: {destination}). Skipping energy-based planning.")
+        return None
+    
+    # If using an external PCE
+    if current_app.config["PCE_EXTERNAL"]:
+        logging.debug("Using external PCE for path planning")    
+        def build_slice_input(node_source, node_destination):
+            """Build input format for external PCE slice computation."""
+            return {
+                "clientName": "demo-client",
+                "requestId": random.randint(1000, 9999),
+                "sites": [node_source["nodeId"], node_destination["nodeId"]],
+                "graph": {
+                    "nodes": [
+                        {
+                            "nodeId": node_source["nodeId"],
+                            "name": node_source["name"],
+                            "footprint": node_source["footprint"],
+                            "sticky": [node_source["nodeId"]]
+                        },
+                        {
+                            "nodeId": node_destination["nodeId"],
+                            "name": node_destination["name"],
+                            "footprint": node_destination["footprint"],
+                            "sticky": [node_destination["nodeId"]]
+                        }
+                    ],
+                    "links": [
+                        {
+                            "fromNodeId": node_source["nodeId"],
+                            "toNodeId": node_destination["nodeId"],
+                            "bandwidth": 1000000000,
+                            "metrics": [
+                                {
+                                    "metric": "DELAY",
+                                    "value": 10,
+                                    "bound": True,
+                                    "required": True
+                                }
+                            ]
+                        }
+                    ],
+                    "constraints": {
+                        "maxVulnerability": 3,
+                        "maxDeployedServices": 10,
+                        "metricLimits": []
+                    }
+                }
+            }
+        
+        source = next((node for node in topology["nodes"] if node["name"] == source), None)
+        destination = next((node for node in topology["nodes"] if node["name"] == destination), None)
+        slice_input = build_slice_input(source, destination)
+
+        def simulate_slice_output(input_data):
+            """
+            Simulate external PCE response for slice computation.
+            
+            Args:
+                input_data (dict): Input data for slice computation
+                
+            Returns:
+                dict: Simulated slice output with path information
+            """
+            return {
+                "input": input_data,
+                "slice": {
+                    "nodes": [
+                        {"site": 1, "service": 1},
+                        {"site": 2, "service": 2}
+                    ],
+                    "links": [
+                        {
+                            "fromNodeId": 1,
+                            "toNodeId": 2,
+                            "lspId": 500,
+                            "path": {
+                                "ingressNodeId": 1,
+                                "egressNodeId": 2,
+                                "hops": [
+                                    {"nodeId": 3, "linkId": "A-C", "portId": 1},
+                                    {"nodeId": 2, "linkId": "C-B", "portId": 2}
+                                ]
+                            }
+                        }
+                    ],
+                    "metric": {"value": 9}
+                },
+                "error": None
+            }
+        
+        slice_output = simulate_slice_output(slice_input)
+        # Build optimal path from PCE response
+        optimal_path.append(source["name"])
+        for link in slice_output["slice"]["links"]:
+            for hop in link["path"]["hops"]:
+                optimal_path.append(next((node for node in topology["nodes"] if node["nodeId"] == hop['nodeId']), None)["name"])
+    
+    else:
+        logging.debug("Using internal PCE for path planning")
+        ietf_dlos = intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+        logging.debug(ietf_dlos)
+        
+        # Extract DLOS (Delay and Loss Objectives) constraints
+        dlos = {
+            "EC": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_consumption"), None),
+            "CE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "carbon_emission"), None),
+            "EE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_efficiency"), None),
+            "URE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "renewable_energy_usage"), None)
+        }
+        logging.debug(f"Planning optimal path from {source} to {destination} with DLOS: {dlos}")
+        optimal_path = calculate_optimal_path(topology, energy_metrics, source, destination, dlos)
+
+    if not optimal_path:
+        logging.error("No valid energy path found")
+        return None
+
+    return optimal_path
+
+
+def retrieve_energy():
+    """
+    Retrieve energy consumption data for network nodes.
+    
+    Returns:
+        dict: Energy metrics including power consumption, carbon emissions,
+              efficiency, and renewable energy usage for each node
+              
+    Notes:
+        TODO: Implement logic to retrieve real-time data from controller
+        Currently reads from static JSON file
+    """
+    with open(os.path.join(SRC_PATH, "planner/energy_planner/energy_ddbb.json"), "r") as archivo:
+        energy_metrics = json.load(archivo)
+    return energy_metrics
+
+
+def retrieve_topology():
+    """
+    Retrieve network topology information.
+    
+    Returns:
+        dict: Network topology with nodes and links
+        
+    Notes:
+        - If PCE_EXTERNAL is True, retrieves topology for external PCE format
+        - Otherwise retrieves topology in internal format
+        TODO: Implement logic to retrieve real-time data from controller
+        Currently reads from static JSON files
+    """
+    if current_app.config["PCE_EXTERNAL"]:
+        # TODO: Implement the logic to retrieve topology data from external PCE
+        # GET /sss/v1/topology/node and /sss/v1/topology/link
+        with open(os.path.join(SRC_PATH, "planner/energy_planner/ext_topo_ddbb.json"), "r") as archivo:
+            topology = json.load(archivo)
+    else:
+        # TODO: Implement the logic to retrieve topology data from controller
+        with open(os.path.join(SRC_PATH, "planner/energy_planner/topo_ddbb.json"), "r") as archivo:
+            topology = json.load(archivo)
+    return topology
+
+
+def calculate_optimal_path(topology, energy_metrics, source, destination, dlos):
+    """
+    Calculate the optimal path using Dijkstra's algorithm with energy constraints.
+    
+    This function implements a constrained shortest path algorithm that considers
+    energy consumption, carbon emissions, energy efficiency, and renewable energy
+    usage as optimization criteria.
+    
+    Args:
+        topology (dict): Network topology with nodes and links
+        energy_metrics (dict): Energy consumption data for each node
+        source (str): Source node identifier
+        destination (str): Destination node identifier
+        dlos (dict): Constraint bounds for:
+                    - EC: Energy Consumption limit
+                    - CE: Carbon Emission limit
+                    - EE: Energy Efficiency limit
+                    - URE: Minimum Renewable Energy Usage
+    
+    Returns:
+        list: Ordered list of node names forming the optimal path,
+              or empty list if no valid path exists
+              
+    Notes:
+        - Uses modified Dijkstra's algorithm with multiple constraints
+        - Paths violating any DLOS constraint are discarded
+        - Node weights computed using compute_node_weight function
+    """
+    logging.debug("Starting optimal path calculation...")
+    
+    # Create a dictionary with the weights of each node
+    node_data_map = {}
+    for node_data in energy_metrics:
+        node_id = node_data["name"]
+        ec = node_data["typical-power"]
+        ce = node_data["carbon-emissions"]
+        ee = node_data["efficiency"]
+        ure = node_data["renewable-energy-usage"]
+
+        total_power_supply = sum(ps["typical-power"] for ps in node_data["power-supply"])
+        total_power_boards = sum(b["typical-power"] for b in node_data["boards"])
+        total_power_components = sum(c["typical-power"] for c in node_data["components"])
+        total_power_transceivers = sum(t["typical-power"] for t in node_data["transceivers"])
+
+        logging.debug(f"Node {node_id}: EC={ec}, CE={ce}, EE={ee}, URE={ure}")
+        logging.debug(f"Node {node_id}: PS={total_power_supply}, BO={total_power_boards}, CO={total_power_components}, TR={total_power_transceivers}")
+
+        weight = compute_node_weight(ec, ce, ee, ure,
+                                            total_power_supply,
+                                            total_power_boards,
+                                            total_power_components,
+                                            total_power_transceivers)
+        logging.debug(f"Weight for node {node_id}: {weight}")
+        
+        node_data_map[node_id] = {
+            "weight": weight,
+            "ec": ec,
+            "ce": ce,
+            "ee": ee,
+            "ure": ure
+        }
+
+    # Create a graph representation of the topology
+    graph = {}
+    for node in topology["ietf-network:networks"]["network"][0]["node"]:
+        graph[node["node-id"]] = []
+    for link in topology["ietf-network:networks"]["network"][0]["link"]:
+        src = link["source"]["source-node"]
+        dst = link["destination"]["dest-node"]
+        graph[src].append((dst, node_data_map[dst]["weight"]))
+        logging.debug(f"Added link: {src} -> {dst} with weight {node_data_map[dst]['weight']}")
+
+    # Dijkstra's algorithm with restrictions
+    # Queue: (accumulated cost, current node, path, sum_ec, sum_ce, sum_ee, min_ure)
+    queue = [(0, source, [], 0, 0, 0, 1)]
+    visited = set()
+
+    logging.debug(f"Starting search from {source} to {destination} with restrictions: {dlos}")
+
+    while queue:
+        cost, node, path, sum_ec, sum_ce, sum_ee, min_ure = heapq.heappop(queue)
+        logging.debug(f"Exploring node {node} with cost {cost} and path {path + [node]}")
+        
+        if node in visited:
+            logging.debug(f"Node {node} already visited, skipped.")
+            continue
+        visited.add(node)
+        path = path + [node]
+
+        node_metrics = node_data_map[node]
+        sum_ec += node_metrics["ec"]
+        sum_ce += node_metrics["ce"]
+        sum_ee += node_metrics["ee"]
+        min_ure = min(min_ure, node_metrics["ure"]) if path[:-1] else node_metrics["ure"]
+
+        logging.debug(f"Accumulated -> EC: {sum_ec}, CE: {sum_ce}, EE: {sum_ee}, URE min: {min_ure}")
+
+        # Check constraint violations
+        if dlos["EC"] is not None and sum_ec > dlos["EC"]:
+            logging.debug(f"Discarded path {path} for exceeding EC ({sum_ec} > {dlos['EC']})")
+            continue
+        if dlos["CE"] is not None and sum_ce > dlos["CE"]:
+            logging.debug(f"Discarded path {path} for exceeding CE ({sum_ce} > {dlos['CE']})")
+            continue
+        if dlos["EE"] is not None and sum_ee > dlos["EE"]:
+            logging.debug(f"Discarded path {path} for exceeding EE ({sum_ee} > {dlos['EE']})")
+            continue
+        if dlos["URE"] is not None and min_ure < dlos["URE"]:
+            logging.debug(f"Discarded path {path} for not reaching minimum URE ({min_ure} < {dlos['URE']})")
+            continue
+
+        if node == destination:
+            logging.debug(f"Destination {destination} reached with a valid path: {path}")
+            return path
+
+        for neighbor, weight in graph.get(node, []):
+            if neighbor not in visited:
+                logging.debug(f"Queue -> neighbour: {neighbor}, weight: {weight}")
+                heapq.heappush(queue, (
+                    cost + weight,
+                    neighbor,
+                    path,
+                    sum_ec,
+                    sum_ce,
+                    sum_ee,
+                    min_ure
+                ))
+    
+    logging.debug("No valid path found that meets the restrictions.")
+    return []
+
+
+def compute_node_weight(ec, ce, ee, ure, total_power_supply, total_power_boards, 
+                       total_power_components, total_power_transceivers, 
+                       alpha=1, beta=1, gamma=1, delta=1):
+    """
+    Calculate node weight based on energy and environmental metrics.
+    
+    Computes a green index that represents the environmental impact of routing
+    traffic through a node, considering power consumption and carbon emissions.
+    
+    Args:
+        ec (float): Base energy consumption of the node
+        ce (float): Carbon emissions factor
+        ee (float): Energy efficiency metric
+        ure (float): Renewable energy usage ratio (0-1)
+        total_power_supply (float): Total power from supply units
+        total_power_boards (float): Total power consumed by boards
+        total_power_components (float): Total power consumed by components
+        total_power_transceivers (float): Total power consumed by transceivers
+        alpha (float, optional): Weight for energy consumption. Defaults to 1
+        beta (float, optional): Weight for carbon emissions. Defaults to 1
+        gamma (float, optional): Weight for energy efficiency. Defaults to 1
+        delta (float, optional): Weight for renewable energy. Defaults to 1
+    
+    Returns:
+        float: Computed green index representing environmental impact
+        
+    Notes:
+        Formula: green_index = (power_idle + power_traffic) * time / 1000 * (1 - ure) * ce
+        - Assumes 100 units of traffic
+        - Measured over 1 hour time period
+    """
+    traffic = 100
+    # Measure one hour of traffic
+    time = 1
+
+    power_idle = ec + total_power_supply + total_power_boards + total_power_components + total_power_transceivers
+    power_traffic = traffic * ee
+
+    power_total = (power_idle + power_traffic)
+
+    green_index = power_total * time / 1000 * (1 - ure) * ce
+
+    return green_index
\ No newline at end of file
diff --git a/src/planner/energy_ddbb.json b/src/planner/energy_planner/energy_ddbb.json
similarity index 100%
rename from src/planner/energy_ddbb.json
rename to src/planner/energy_planner/energy_ddbb.json
diff --git a/src/planner/ext_topo_ddbb.json b/src/planner/energy_planner/ext_topo_ddbb.json
similarity index 100%
rename from src/planner/ext_topo_ddbb.json
rename to src/planner/energy_planner/ext_topo_ddbb.json
diff --git a/src/planner/topo_ddbb.json b/src/planner/energy_planner/topo_ddbb.json
similarity index 100%
rename from src/planner/topo_ddbb.json
rename to src/planner/energy_planner/topo_ddbb.json
diff --git a/src/planner/hrat_planner/hrat.py b/src/planner/hrat_planner/hrat.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b370d7430016c0e4c2effff8de6bbbbd87f8bd2
--- /dev/null
+++ b/src/planner/hrat_planner/hrat.py
@@ -0,0 +1,85 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, requests
+
+def hrat_planner(data: str, ip: str, action: str = "create") -> dict:
+    """
+    Interface with the HRAT (Hierarchical Resource Allocation Tool) for transport network slice management.
+    
+    This function communicates with an external HRAT service to create or delete
+    transport network slices, handling optical layer provisioning and IP layer
+    configuration.
+    
+    Args:
+        data (str or dict): Network slice UUID for deletion, or full intent data for creation
+        ip (str): IP address of the HRAT service
+        action (str, optional): Operation to perform - "create" or "delete". Defaults to "create"
+    
+    Returns:
+        dict: Response from HRAT service containing:
+            - network-slice-uuid: Unique identifier for the slice
+            - viability: Boolean indicating if slice is viable
+            - actions: List of configuration actions including:
+                * CREATE_OPTICAL_SLICE
+                * PROVISION_MEDIA_CHANNEL_OLS_PATH
+                * ACTIVATE_TRANSCEIVER
+                * CONFIG_VPNL3
+              
+    Notes:
+        - On timeout or connection errors, returns static fallback data
+        - HRAT service expected at port 9090
+        - Timeout set to 15 seconds for all requests
+        
+    Raises:
+        requests.exceptions.RequestException: On HTTP request failures (logged, not raised)
+    """
+    data_static = {'network-slice-uuid': 'ecoc25-short-path-a7764e55-9bdb-4e38-9386-02ff47a33225', 'viability': True, 'actions': [{'type': 'CREATE_OPTICAL_SLICE', 'layer': 'OPTICAL', 'content': {'tenant-uuid': 'ea4ade23-1444-4f93-aabc-4fcbe2ae74dd', 'service-interface-point': [{'uuid': 'e7444187-119b-5b2e-8a60-ee26b30c441a'}, {'uuid': 'b32b1623-1f64-59d2-8148-b035a8f77625'}], 'node': [{'uuid': '68eb48ac-b686-5653-bdaf-7ccaeecd0709', 'owned-node-edge-point': [{'uuid': '7fd74b80-2b5a-55e2-8ef7-82bf589c9591', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}, {'uuid': '7b9f0b65-2387-5352-bc36-7173639463f0', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}]}, {'uuid': 'f55351ce-a5c8-50a7-b506-95b40e08bce4', 'owned-node-edge-point': [{'uuid': 'da6d924d-9cb4-5add-817d-f83e910beb2e', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}, {'uuid': '577ec899-ad92-5a19-a140-405a3cdbaa17', 'media-channel-node-edge-point-spec': {'mc-pool': {'supportable-spectrum': [{'lower-frequency': '191325000', 'upper-frequency': '192225000'}, {'lower-frequency': '194325000', 'upper-frequency': '195225000'}]}}}]}], 'link': [{'uuid': '3beef785-bb26-5741-af10-c5e1838c1701'}, {'uuid': '6144c664-246a-58ed-bf0a-7ec4286625da'}]}, 'controller-uuid': 'TAPI Optical Controller'}, {'type': 'PROVISION_MEDIA_CHANNEL_OLS_PATH', 'layer': 'OPTICAL', 'content': {'ols-path-uuid': 'cfeae4cb-c305-4884-9945-8b0c0f040c98', 'src-sip-uuid': 'e7444187-119b-5b2e-8a60-ee26b30c441a', 'dest-sip-uuid': 'b32b1623-1f64-59d2-8148-b035a8f77625', 'direction': 'BIDIRECTIONAL', 'layer-protocol-name': 'PHOTONIC_MEDIA', 'layer-protocol-qualifier': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_MC', 'bandwidth-ghz': 100, 'link-uuid-path': ['3beef785-bb26-5741-af10-c5e1838c1701'], 'lower-frequency-mhz': '194700000', 'upper-frequency-mhz': '194800000', 'adjustment-granularity': 'G_6_25GHZ', 'grid-type': 'FLEX'}, 'controller-uuid': 'TAPI Optical Controller', 'tenant-uuid': 'ea4ade23-1444-4f93-aabc-4fcbe2ae74dd'}, {'type': 'ACTIVATE_TRANSCEIVER', 'layer': 'OPTICAL', 'content': {'node-uuid': 'Phoenix-1', 'termination-point-uuid': 'Ethernet110', 'transceiver-type': 'CFP2', 'frequency-ghz': 194700.0, 'spectrum-width-ghz': 100.0, 'tx-power-dbm': 0.0}, 'controller-uuid': 'IP Controller'}, {'type': 'ACTIVATE_TRANSCEIVER', 'layer': 'OPTICAL', 'content': {'node-uuid': 'Phoenix-2', 'termination-point-uuid': 'Ethernet220', 'transceiver-type': 'CFP2', 'frequency-ghz': 194700.0, 'spectrum-width-ghz': 100.0, 'tx-power-dbm': 0.0}, 'controller-uuid': 'IP Controller'}, {'type': 'CONFIG_VPNL3', 'layer': 'IP', 'content': {'tunnel-uuid': '9aae851a-eea9-4a28-969f-0e2c2196e936', 'src-node-uuid': 'Phoenix-1', 'src-ip-address': '10.10.1.1', 'src-ip-mask': '/24', 'src-vlan-id': 100, 'dest-node-uuid': 'Phoenix-2', 'dest-ip-address': '10.10.2.1', 'dest-ip-mask': '/24', 'dest-vlan-id': 100}, 'controller-uuid': 'IP Controller'}]}
+    url = f'http://{ip}:9090/api/resource-allocation/transport-network-slice-l3'
+    headers = {'Content-Type': 'application/json'}
+
+    try:
+        if action == "delete":
+            # Build deletion payload with slice ID
+            payload = {
+                "ietf-network-slice-service:network-slice-services": {
+                    "slice-service": [
+                        {
+                            "id": data
+                        }
+                    ]
+                }
+            }
+            response = requests.delete(url, headers=headers, json=payload, timeout=1)
+        elif action == "create":
+            response = requests.post(url, headers=headers, json=data, timeout=1)
+        else:
+            logging.error("Invalid action. Use 'create' or 'delete'.")
+            return data_static
+        
+        if response.ok:
+            return response.json()
+        else:
+            logging.error(f"Request failed with status code {response.status_code}: {response.text}")
+            return data_static
+
+    except requests.exceptions.RequestException as e:
+        logging.error(f"HTTP request failed: {e}. Returning default data")
+        return data_static
+    except Exception as e:
+        logging.error(f"Unexpected error: {e}")
+        return data_static
+
diff --git a/src/planner/planner.py b/src/planner/planner.py
index b5fb1ba1ee624fcc090d4c85dfc252f2463ac042..979783463ffc29e21e6edd2a7fe88864537081cd 100644
--- a/src/planner/planner.py
+++ b/src/planner/planner.py
@@ -14,273 +14,40 @@
 
 # This file is an original contribution from Telefonica Innovación Digital S.L.
 
-import logging, random, os, json, heapq  
-from src.Constants import SRC_PATH, PCE_EXTERNAL, DEFAULT_LOGGING_LEVEL
+import logging
+from src.planner.energy_planner.energy           import energy_planner
+from src.planner.hrat_planner.hrat               import hrat_planner
+from src.planner.tfs_optical_planner.tfs_optical import tfs_optical_planner
+from flask import current_app
 
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
-    level=DEFAULT_LOGGING_LEVEL,
-    format='%(levelname)s - %(message)s')
 
 class Planner:
+    """
+    Planner class to compute optimal paths for network slices.
+    Uses different strategies based on configuration.
+    """
     """
     Planner class to compute the optimal path for a network slice based on energy consumption and topology.
     """
 
-    def planner(self, intent):
+    def planner(self, intent, type):
         """
         Plan the optimal path for a network slice based on energy consumption and topology.
-        """
-        energy_metrics = self.__retrieve_energy()
-        topology = self.__retrieve_topology()
-        source = intent.get("ietf-network-slice-service:network-slice-services", {}).get("slice-service", [])[0].get("sdps", {}).get("sdp", [])[0].get("id") or "A"
-        destination = intent.get("ietf-network-slice-service:network-slice-services", {}).get("slice-service", [])[0].get("sdps", {}).get("sdp", [])[1].get("id") or "B"
-        optimal_path = []
-        # If using an external PCE
-        if PCE_EXTERNAL:
-            logging.info("Using external PCE for path planning")    
-            def build_slice_input(node_source, node_destination):
-                return {
-                    "clientName": "demo-client",
-                    "requestId": random.randint(1000, 9999),
-                    "sites": [node_source["nodeId"], node_destination["nodeId"]],
-                    "graph": {
-                        "nodes": [
-                            {
-                                "nodeId": node_source["nodeId"],
-                                "name": node_source["name"],
-                                "footprint": node_source["footprint"],
-                                "sticky": [node_source["nodeId"]]
-                            },
-                            {
-                                "nodeId": node_destination["nodeId"],
-                                "name": node_destination["name"],
-                                "footprint": node_destination["footprint"],
-                                "sticky": [node_destination["nodeId"]]
-                            }
-                        ],
-                        "links": [
-                            {
-                                "fromNodeId": node_source["nodeId"],
-                                "toNodeId": node_destination["nodeId"],
-                                "bandwidth": 1000000000,
-                                "metrics": [
-                                    {
-                                        "metric": "DELAY",
-                                        "value": 10,
-                                        "bound": True,
-                                        "required": True
-                                    }
-                                ]
-                            }
-                        ],
-                        "constraints": {
-                            "maxVulnerability": 3,
-                            "maxDeployedServices": 10,
-                            "metricLimits": []
-                        }
-                    }
-                }
-            source = next((node for node in topology["nodes"] if node["name"] == source), None)
-            destination = next((node for node in topology["nodes"] if node["name"] == destination), None)
-            slice_input = build_slice_input(source, destination)
-
-            # POST /sss/v1/slice/compute
-            def simulate_slice_output(input_data):
-                return {
-                    "input": input_data,
-                    "slice": {
-                        "nodes": [
-                            {"site": 1, "service": 1},
-                            {"site": 2, "service": 2}
-                        ],
-                        "links": [
-                            {
-                                "fromNodeId": 1,
-                                "toNodeId": 2,
-                                "lspId": 500,
-                                "path": {
-                                    "ingressNodeId": 1,
-                                    "egressNodeId": 2,
-                                    "hops": [
-                                        {"nodeId": 3, "linkId": "A-C", "portId": 1},
-                                        {"nodeId": 2, "linkId": "C-B", "portId": 2}
-                                    ]
-                                }
-                            }
-                        ],
-                        "metric": {"value": 9}
-                    },
-                    "error": None
-                }
-            slice_output = simulate_slice_output(slice_input)
-            # Mostrar resultado
-            optimal_path.append(source["name"])
-            for link in slice_output["slice"]["links"]:
-                for hop in link["path"]["hops"]:
-                    optimal_path.append(next((node for node in topology["nodes"] if node["nodeId"] == hop['nodeId']), None)["name"])
-        
-        else:
-            logging.info("Using internal PCE for path planning")
-            ietf_dlos = intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
-            logging.info(ietf_dlos),
-            # Solo asigna los DLOS que existan, el resto a None
-            dlos = {
-                "EC": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_consumption"), None),
-                "CE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "carbon_emission"), None),
-                "EE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_efficiency"), None),
-                "URE": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "renewable_energy_usage"), None)
-            }
-            logging.debug(f"Planning optimal path from {source} to {destination} with DLOS: {dlos}")
-            optimal_path = self.__calculate_optimal_path(topology, energy_metrics, source, destination, dlos)
-
-        if not optimal_path:
-            logging.error("No valid path found")
-            raise Exception("No valid energy path found")
-
-        return optimal_path
-
-    def __retrieve_energy(self):
-        # TODO : Implement the logic to retrieve energy consumption data from controller
-        # Taking it from static file
-        with open(os.path.join(SRC_PATH, "planner/energy_ddbb.json"), "r") as archivo:
-            energy_metrics = json.load(archivo)
-        return energy_metrics
-
-    def __retrieve_topology(self):
-        if PCE_EXTERNAL:
-            # TODO : Implement the logic to retrieve topology data from external PCE
-            # GET /sss/v1/topology/node and /sss/v1/topology/link
-            with open(os.path.join(SRC_PATH, "planner/ext_topo_ddbb.json"), "r") as archivo:
-                topology = json.load(archivo)
-        else:
-            # TODO : Implement the logic to retrieve topology data from controller
-            # Taking it from static file
-            with open(os.path.join(SRC_PATH, "planner/topo_ddbb.json"), "r") as archivo:
-                topology = json.load(archivo)
-        return topology
-
-
-
-    def __calculate_optimal_path(self, topology, energy_metrics, source, destination, dlos):
-        logging.debug("Starting optimal path calculation...")
-        
-        # Create a dictionary with the weights of each node
-        node_data_map = {}
-        for node_data in energy_metrics:
-            node_id = node_data["name"]
-            ec = node_data["typical-power"]
-            ce = node_data["carbon-emissions"]
-            ee = node_data["efficiency"]
-            ure = node_data["renewable-energy-usage"]
-
-            total_power_supply = sum(ps["typical-power"] for ps in node_data["power-supply"])
-            total_power_boards = sum(b["typical-power"] for b in node_data["boards"])
-            total_power_components = sum(c["typical-power"] for c in node_data["components"])
-            total_power_transceivers = sum(t["typical-power"] for t in node_data["transceivers"])
-
-            logging.debug(f"Node {node_id}: EC={ec}, CE={ce}, EE={ee}, URE={ure}")
-            logging.debug(f"Node {node_id}: PS={total_power_supply}, BO={total_power_boards}, CO={total_power_components}, TR={total_power_transceivers}")
-
-            weight = self.__compute_node_weight(ec, ce, ee, ure,
-                                                total_power_supply,
-                                                total_power_boards,
-                                                total_power_components,
-                                                total_power_transceivers)
-            logging.debug(f"Weight for node {node_id}: {weight}")
-            
-            node_data_map[node_id] = {
-                "weight": weight,
-                "ec": ec,
-                "ce": ce,
-                "ee": ee,
-                "ure": ure
-            }
-
-        # Create a graph representation of the topology
-        graph = {}
-        for node in topology["ietf-network:networks"]["network"][0]["node"]:
-            graph[node["node-id"]] = []
-        for link in topology["ietf-network:networks"]["network"][0]["link"]:
-            src = link["source"]["source-node"]
-            dst = link["destination"]["dest-node"]
-            graph[src].append((dst, node_data_map[dst]["weight"]))
-            logging.debug(f"Added link: {src} -> {dst} with weight {node_data_map[dst]['weight']}")
 
-        # Dijkstra's algorithm with restrictions
-        queue = [(0, source, [], 0, 0, 0, 1)]  # (accumulated cost, current node, path, sum_ec, sum_ce, sum_ee, min_ure)
-        visited = set()
+        Args:
+            intent (dict): Network slice intent
+            type (str): Planner type (ENERGY, HRAT, TFS_OPTICAL)
 
-        logging.debug(f"Starting search from {source} to {destination} with restrictions: {dlos}")
-        
-
-        while queue:
-            cost, node, path, sum_ec, sum_ce, sum_ee, min_ure = heapq.heappop(queue)
-            logging.debug(f"Exploring node {node} with cost {cost} and path {path + [node]}")
-            
-            if node in visited:
-                logging.debug(f"Node {node} already visited, skipped.")
-                continue
-            visited.add(node)
-            path = path + [node]
-
-            node_metrics = node_data_map[node]
-            sum_ec += node_metrics["ec"]
-            sum_ce += node_metrics["ce"]
-            sum_ee += node_metrics["ee"]
-            min_ure = min(min_ure, node_metrics["ure"]) if path[:-1] else node_metrics["ure"]
-
-            logging.debug(f"Accumulated -> EC: {sum_ec}, CE: {sum_ce}, EE: {sum_ee}, URE min: {min_ure}")
-
-            if dlos["EC"] is not None and sum_ec > dlos["EC"]:
-                logging.debug(f"Discarded path {path} for exceeding EC ({sum_ec} > {dlos['EC']})")
-                continue
-            if dlos["CE"] is not None and sum_ce > dlos["CE"]:
-                logging.debug(f"Discarded path {path} for exceeding CE ({sum_ce} > {dlos['CE']})")
-                continue
-            if dlos["EE"] is not None and sum_ee > dlos["EE"]:
-                logging.debug(f"Discarded path {path} for exceeding EE ({sum_ee} > {dlos['EE']})")
-                continue
-            if dlos["URE"] is not None and min_ure < dlos["URE"]:
-                logging.debug(f"Discarded path {path} for not reaching minimum URE ({min_ure} < {dlos['URE']})")
-                continue
-
-            if node == destination:
-                logging.debug(f"Destination {destination} reached with a valid path: {path}")
-                return path
-
-            for neighbor, weight in graph.get(node, []):
-                if neighbor not in visited:
-                    logging.debug(f"Qeue -> neighbour: {neighbor}, weight: {weight}")
-                    heapq.heappush(queue, (
-                        cost + weight,
-                        neighbor,
-                        path,
-                        sum_ec,
-                        sum_ce,
-                        sum_ee,
-                        min_ure
-                    ))
-        logging.debug("No valid path found that meets the restrictions.")
-        return []
-
-
-    def __compute_node_weight(self, ec, ce, ee, ure, total_power_supply, total_power_boards, total_power_components, total_power_transceivers, alpha=1, beta=1, gamma=1, delta=1):
-        """
-        Calcula el peso de un nodo con la fórmula:
-        w(v) = α·EC + β·CE + γ/EE + δ·(1 - URE)
+        Returns:
+            dict or None: Planner result or None if type is invalid
         """
-        traffic = 100
-        # Measure one hour of traffic
-        time = 1
-
-        power_idle = ec + total_power_supply + total_power_boards + total_power_components + total_power_transceivers
-        power_traffic = traffic * ee
-
-        power_total = (power_idle + power_traffic)
-
-        green_index = power_total * time / 1000 * (1 - ure) * ce
-
-        return green_index 
-    
-
+        # Log selected planner type
+        logging.info(f"Planner type selected: {type}")
+        # Use energy planner strategy
+        if type   == "ENERGY"     : return energy_planner(intent)
+        # Use HRAT planner with configured IP
+        elif type == "HRAT"       : return hrat_planner(intent, current_app.config["HRAT_IP"])
+        # Use TFS optical planner with configured IP
+        elif type == "TFS_OPTICAL": return tfs_optical_planner(intent, current_app.config["OPTICAL_PLANNER_IP"], action = "create")
+        # Return None if planner type is unsupported
+        else : return None
diff --git a/src/planner/tfs_optical_planner/tfs_optical.py b/src/planner/tfs_optical_planner/tfs_optical.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d5bc79a76f9765ce6f2826f0e6b5d2a0f6a346c
--- /dev/null
+++ b/src/planner/tfs_optical_planner/tfs_optical.py
@@ -0,0 +1,393 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+import requests
+import os
+import uuid
+import json
+from src.config.constants import TEMPLATES_PATH
+from src.utils.safe_get import safe_get
+
+
+def tfs_optical_planner(intent, ip: str, action: str = "create") -> dict:
+    """
+    Plan optical layer configuration for TeraFlow SDN network slices.
+    
+    This function computes optical paths and generates configuration rules for
+    point-to-multipoint (P2MP) optical connections, including transceiver
+    activation and Layer 3 VPN configuration.
+    
+    Args:
+        intent (dict or str): For create action - network slice intent with service
+                             delivery points. For delete action - slice ID string
+        ip (str): IP address of the optical path computation service
+        action (str, optional): Operation to perform - "create" or "delete". 
+                               Defaults to "create"
+    
+    Returns:
+        dict or None: Configuration rules containing:
+            - network-slice-uuid: Unique identifier
+            - viability: Boolean indicating success
+            - actions: List of provisioning actions for:
+                * XR_AGENT_ACTIVATE_TRANSCEIVER (optical layer)
+                * CONFIG_VPNL3 (IP layer)
+              Returns None if source/destination not found or service unavailable
+              
+    Notes:
+        - Supports P2MP (Point-to-Multipoint) connectivity
+        - Computes optical paths using external TFS optical service
+        - Configures digital subcarrier groups for wavelength division
+        - Port 31060 used for optical path computation API
+        
+    Raises:
+        requests.exceptions.RequestException: On connection errors (logged, returns None)
+    """
+    if action == 'delete':
+        logging.debug("DELETE REQUEST RECEIVED: %s", intent)
+        # Load slice database to retrieve intent for deletion
+        with open(os.path.join(TEMPLATES_PATH, "slice.db"), 'r', encoding='utf-8') as file:
+            slices = json.load(file)
+
+        for slice_obj in slices:
+            if 'slice_id' in slice_obj and slice_obj['slice_id'] == intent:
+                logging.debug("Slice found: %s", slice_obj['slice_id'])
+                source = None
+                destination = None
+                services = slice_obj['intent']['ietf-network-slice-service:network-slice-services']['slice-service']
+                
+                # Extract source and destination from P2MP structure
+                for service in services:
+                    c_groups = service.get("connection-groups", {}).get("connection-group", [])
+                    for cg in c_groups:
+                        constructs = cg.get("connectivity-construct", [])
+                        for construct in constructs:
+                            if "p2mp-sdp" in construct:
+                                source = construct["p2mp-sdp"]["root-sdp-id"]
+                                destination = construct["p2mp-sdp"]["leaf-sdp-id"]
+                                break
+                        if source and destination:
+                            break
+                            
+                response = send_request(source, destination)
+                summary = {
+                    "source": source,
+                    "destination": destination,
+                    "connectivity-service": response
+                }
+                rules = generate_rules(summary, intent, action)
+    else:
+        # Extract source and destination from creation intent
+        services = intent["ietf-network-slice-service:network-slice-services"]["slice-service"]
+        source = None
+        destination = None
+        
+        for service in services:
+            c_groups = service.get("connection-groups", {}).get("connection-group", [])
+            for cg in c_groups:
+                constructs = cg.get("connectivity-construct", [])
+                for construct in constructs:
+                    source = safe_get(construct, ["p2mp-sdp", "root-sdp-id"])
+                    destination = safe_get(construct, ["p2mp-sdp", "leaf-sdp-id"])
+                    if source and destination:
+                        break
+                if source and destination:
+                    break
+                    
+        response = None
+        if source and destination:
+            response = send_request(source, destination, ip)
+            if not response:
+                return None
+            summary = {
+                "source": source,
+                "destination": destination,
+                "connectivity-service": response
+            }
+            logging.debug(summary)
+            rules = generate_rules(summary, intent, action)
+        else:
+            logging.warning(f"No rules generated. Skipping optical planning.")
+            return None
+            
+    return rules
+
+
+def send_request(source, destination, ip):
+    """
+    Send path computation request to the optical TFS service.
+    
+    Computes point-to-multipoint optical paths using the TAPI path computation API.
+    
+    Args:
+        source (str or list): Root node identifier(s) for P2MP path
+        destination (str or list): Leaf node identifier(s) for P2MP path
+        ip (str): IP address of the TFS optical service
+    
+    Returns:
+        dict or None: Path computation response containing connectivity service
+                     with optical connection attributes, or None on failure
+                     
+    Notes:
+        - API endpoint: POST /OpticalTFS/restconf/operations/tapi-path-computation:compute-p2mp
+        - Assumes 100 Gbps bitrate, bidirectional transmission
+        - Band width of 200, with 4 subcarriers per source
+        - 15 second timeout for requests
+    """
+    url = f"http://{ip}:31060/OpticalTFS/restconf/operations/tapi-path-computation:compute-p2mp"
+
+    headers = {
+        "Content-Type": "application/json",
+        "Accept": "*/*"
+    }
+
+    # Normalize source and destination to lists
+    if isinstance(source, str):
+        sources_list = [source]
+    else:
+        sources_list = list(source)
+
+    if isinstance(destination, str):
+        destinations_list = [destination]
+    else:
+        destinations_list = list(destination)
+
+    payload = {
+        "sources": sources_list,
+        "destinations": destinations_list,
+        "bitrate": 100,
+        "bidirectional": True,
+        "band": 200,
+        "subcarriers_per_source": [4] * len(sources_list)
+    }
+    logging.debug(f"Payload for path computation: {json.dumps(payload, indent=2)}")
+
+    try:
+        response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=1)
+        return json.loads(response.text)
+    except requests.exceptions.RequestException:
+        logging.warning("Error connecting to the Optical Planner service. Skipping optical planning.")
+        return None
+
+
+def group_block(group, action, group_id_override=None, node=None):
+    """
+    Generate a digital subcarrier group configuration block.
+    
+    Creates configuration for optical digital subcarriers, which are used for
+    wavelength division multiplexing in optical networks.
+    
+    Args:
+        group (dict): Subcarrier group data from path computation response
+        action (str): "create" to activate, "delete" to deactivate
+        group_id_override (int, optional): Override group ID. Defaults to None
+        node (str, optional): Node type - "leaf" for simplified config. Defaults to None
+    
+    Returns:
+        dict: Digital subcarrier group configuration with:
+            - digital_sub_carriers_group_id: Group identifier
+            - digital_sub_carrier_id: List of subcarrier configs with active status
+            
+    Notes:
+        - Leaf nodes use fixed 4 subcarriers (IDs 1-4)
+        - Non-leaf nodes use subcarrier IDs from computation response
+    """
+    active = "true" if action == 'create' else "false"
+    group_id = group_id_override if group_id_override is not None else group["digital_sub_carriers_group_id"]
+    
+    if node == "leaf":
+        # Simplified configuration for leaf nodes
+        return {
+            "digital_sub_carriers_group_id": group_id,
+            "digital_sub_carrier_id": [
+                {'sub_carrier_id': 1, 'active': active},
+                {'sub_carrier_id': 2, 'active': active},
+                {'sub_carrier_id': 3, 'active': active},
+                {'sub_carrier_id': 4, 'active': active}
+            ]
+        }
+    else:
+        # Full configuration based on computed path
+        return {
+            "digital_sub_carriers_group_id": group_id,
+            "digital_sub_carrier_id": [
+                {
+                    "sub_carrier_id": sid,
+                    "active": active,
+                }
+                for sid in group["subcarrier-id"]
+            ]
+        }
+
+
+def generate_rules(connectivity_service, intent, action):
+    """
+    Generate provisioning rules for optical and IP layer configuration.
+    
+    Transforms path computation results into concrete configuration actions
+    for transceivers and Layer 3 VPN setup.
+    
+    Args:
+        connectivity_service (dict): Path computation summary containing:
+            - source: Root node identifier
+            - destination: List of leaf node identifiers
+            - connectivity-service: Optical connection attributes
+        intent (dict): Original network slice intent with IP configuration
+        action (str): "create" or "delete" operation
+    
+    Returns:
+        list: Configuration rules with provisioning actions
+        
+    Notes:
+        - For create: Generates XR_AGENT_ACTIVATE_TRANSCEIVER and CONFIG_VPNL3 actions
+        - For delete: Generates DEACTIVATE_XR_AGENT_TRANSCEIVER actions
+        - Hub node uses channel-1 at 195000000 MHz
+        - Leaf nodes assigned specific channels (channel-1, channel-3, channel-5)
+        - Fixed VLAN ID of 500 for all connections
+        - Tunnel UUID generated from source-destination string
+    """
+    src_name = connectivity_service.get("source", "FALTA VALOR")
+    dest_list = connectivity_service.get("destination", ["FALTA VALOR"])
+    dest_str = ",".join(dest_list)
+    config_rules = []
+
+    # Generate deterministic UUID for tunnel based on endpoints
+    network_slice_uuid_str = f"{src_name}_to_{dest_str}"
+    tunnel_uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, network_slice_uuid_str))
+    
+    provisionamiento = {
+        "network-slice-uuid": network_slice_uuid_str,
+        "viability": True,
+        "actions": []
+    }
+
+    # Extract optical connection attributes from path computation
+    attributes = connectivity_service["connectivity-service"]["tapi-connectivity:connectivity-service"]["connection"][0]["optical-connection-attributes"]
+    groups = attributes["subcarrier-attributes"]["digital-subcarrier-group"]
+    operational_mode = attributes["modulation"]["operational-mode"]
+    
+    # Build hub (root) configuration with all subcarrier groups
+    hub_groups = [
+        group_block(group, action, group_id_override=index + 1)
+        for index, group in enumerate(groups)
+    ]
+    hub = {
+        "name": "channel-1",
+        "frequency": 195000000,
+        "target_output_power": 0,
+        "operational_mode": operational_mode,
+        "operation": "merge",
+        "digital_sub_carriers_group": hub_groups
+    }
+
+    # Build leaf configurations with specific frequencies per destination
+    leaves = []
+    for dest, group in zip(connectivity_service["destination"], groups):
+        # Map destinations to specific channels and frequencies
+        if dest == "T1.1":
+            name = "channel-1"
+            freq = 195006250
+        if dest == "T1.2":
+            name = "channel-3"
+            freq = 195018750
+        if dest == "T1.3":
+            name = "channel-5"
+            freq = 195031250
+            
+        leaf = {
+            "name": name,
+            "frequency": freq,
+            "target_output_power": group["Tx-power"],
+            "operational_mode": int(group["operational-mode"]),
+            "operation": "merge",
+            "digital_sub_carriers_group": [group_block(group, action, group_id_override=1, node="leaf")]
+        }
+        leaves.append(leaf)
+
+    final_json = {"components": [hub] + leaves}
+    
+    if action == 'create':
+        # Add transceiver activation action
+        provisionamiento["actions"].append({
+            "type": "XR_AGENT_ACTIVATE_TRANSCEIVER",
+            "layer": "OPTICAL",
+            "content": final_json,
+            "controller-uuid": "IPoWDM Controller"
+        })
+
+        # Extract IP configuration from intent for L3 VPN setup
+        nodes = {}
+        sdp_list = intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]['sdps']['sdp']
+
+        for sdp in sdp_list:
+            node = sdp['node-id']
+            attachments = sdp['attachment-circuits']['attachment-circuit']
+            for ac in attachments:
+                ip = ac.get('ac-ipv4-address', None)
+                prefix = ac.get('ac-ipv4-prefix-length', None)
+                vlan = 500  # Fixed VLAN ID
+                nodes[node] = {
+                    "ip-address": ip,
+                    "ip-mask": prefix,
+                    "vlan-id": vlan
+                }
+
+        # Add L3 VPN configuration action for P2MP topology
+        provisionamiento["actions"].append({
+            "type": "CONFIG_VPNL3",
+            "layer": "IP",
+            "content": {
+                "tunnel-uuid": tunnel_uuid,
+                "src-node-uuid": src_name,
+                "src-ip-address": nodes[src_name]["ip-address"],
+                "src-ip-mask": str(nodes[src_name]["ip-mask"]),
+                "src-vlan-id": nodes[src_name]["vlan-id"],
+                "dest1-node-uuid": dest_list[0],
+                "dest1-ip-address": nodes[dest_list[0]]["ip-address"],
+                "dest1-ip-mask": str(nodes[dest_list[0]]["ip-mask"]),
+                "dest1-vlan-id": nodes[dest_list[0]]["vlan-id"],
+                "dest2-node-uuid": dest_list[1],
+                "dest2-ip-address": nodes[dest_list[1]]["ip-address"],
+                "dest2-ip-mask": str(nodes[dest_list[1]]["ip-mask"]),
+                "dest2-vlan-id": nodes[dest_list[1]]["vlan-id"],
+                "dest3-node-uuid": dest_list[2],
+                "dest3-ip-address": nodes[dest_list[2]]["ip-address"],
+                "dest3-ip-mask": str(nodes[dest_list[2]]["ip-mask"]),
+                "dest3-vlan-id": nodes[dest_list[2]]["vlan-id"]
+            },
+            "controller-uuid": "IP Controller"
+        })
+
+        config_rules.append(provisionamiento)
+    else:
+        # For deletion, generate deactivation action
+        nodes = []
+        nodes.append(src_name)
+        for dst in dest_list:
+            nodes.append(dst)
+        aux = tunnel_uuid + '-' + src_name + '-' + '-'.join(dest_list)
+        
+        provisionamiento["actions"].append({
+            "type": "DEACTIVATE_XR_AGENT_TRANSCEIVER",
+            "layer": "OPTICAL",
+            "content": final_json,
+            "controller-uuid": "IPoWDM Controller",
+            "uuid": aux,
+            "nodes": nodes
+        })
+        config_rules.append(provisionamiento)
+
+    return config_rules
\ No newline at end of file
diff --git a/src/realizer/e2e/e2e_connect.py b/src/realizer/e2e/e2e_connect.py
new file mode 100644
index 0000000000000000000000000000000000000000..368ac3df1dc5c9809500f601f369705aefd828a4
--- /dev/null
+++ b/src/realizer/e2e/e2e_connect.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from ..tfs.helpers.tfs_connector import tfs_connector
+
+def e2e_connect(requests, controller_ip):
+    """ 
+    Function to connect end-to-end services in TeraFlowSDN (TFS) controller.
+    
+    Args:
+        requests (list): List of requests to be sent to the TFS e2e controller.
+        controller_ip (str): IP address of the TFS e2e controller.
+    """
+    response = tfs_connector().webui_post(controller_ip, requests)
+    return response
\ No newline at end of file
diff --git a/src/realizer/e2e/main.py b/src/realizer/e2e/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..6eed88389d428755c5b124a59383827de53d9259
--- /dev/null
+++ b/src/realizer/e2e/main.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .service_types.del_l3ipowdm_slice import del_l3ipowdm_slice
+from .service_types.l3ipowdm_slice import l3ipowdm_slice
+
+def e2e(ietf_intent, way=None, response=None, rules = None):
+    logging.debug(f"E2E Realizer selected: {way}")
+    if   way == "L3oWDM":     realizing_request = l3ipowdm_slice(rules)
+    elif way == "DEL_L3oWDM": realizing_request = del_l3ipowdm_slice(rules, response)
+    else:
+        logging.warning(f"Unsupported way: {way}.")
+        realizing_request = None
+    return realizing_request
\ No newline at end of file
diff --git a/src/realizer/e2e/service_types/del_l3ipowdm_slice.py b/src/realizer/e2e/service_types/del_l3ipowdm_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bbeb13c4fe8b58f2e3afb4ed5a22bfb02cb286c
--- /dev/null
+++ b/src/realizer/e2e/service_types/del_l3ipowdm_slice.py
@@ -0,0 +1,177 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH, NBI_L2_PATH
+from src.utils.load_template import load_template
+from flask import current_app
+
+def del_l3ipowdm_slice(ietf_intent, response):
+    """
+    Translate slice intent into a TeraFlow service request.
+
+    This method prepares a L2VPN service request by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+    6. Preparing configuration rules for network interfaces
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+
+    Returns:
+        dict: A TeraFlow service request for L2VPN configuration.
+
+    """
+    # Hardcoded router endpoints
+    # TODO (should be dynamically determined)
+    origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
+    origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
+    destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+    slice = next((d for d in response if d.get("id") == id), None)
+
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        # Load L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "L2-VPN_template_empty.json"))["services"][0]
+
+        # Configure service UUID
+        tfs_request["service_id"]["service_uuid"]["uuid"] = ietf_intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]["id"]
+
+        # Configure service endpoints
+        for endpoint in tfs_request["service_endpoint_ids"]:
+            endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
+            endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
+
+        # Add service constraints
+        for constraint in slice.get("requirements", []):
+            tfs_request["service_constraints"].append({"custom": constraint})
+
+        # Add configuration rules
+        for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
+            router_id = origin_router_id if i == 1 else destination_router_id
+            router_if = origin_router_if if i == 1 else destination_router_if
+            resource_value = config_rule["custom"]["resource_value"]
+
+            sdp_index = i - 1
+            vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
+            if vlan_value:
+                resource_value["vlan_id"] = int(vlan_value)
+            resource_value["circuit_id"] = vlan_value
+            resource_value["remote_router"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
+            config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
+
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        #self.path = NBI_L2_PATH
+        # Load IETF L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "ietfL2VPN_template_empty.json"))
+
+        # Add path to the request
+        tfs_request["path"] = NBI_L2_PATH
+
+        # Generate service UUID
+        full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        uuid_only = full_id.split("slice-service-")[-1]
+        tfs_request["ietf-l2vpn-svc:vpn-service"][0]["vpn-id"] = uuid_only
+
+        # Configure service endpoints
+        sites = tfs_request["ietf-l2vpn-svc:vpn-service"][0]["site"]
+        sdps = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"]
+
+        for i, site in enumerate(sites):
+            is_origin = (i == 0)
+            router_id = origin_router_id if is_origin else destination_router_id
+            sdp = sdps[0] if is_origin else sdps[1]
+            site["site-id"] = router_id
+            site["site-location"] = sdp["node-id"]
+            site["site-network-access"]["interface"]["ip-address"] = sdp["sdp-ip-address"]
+
+    logging.info(f"L2VPN Intent realized\n")
+    return tfs_request
+
+def tfs_l2vpn_support(requests):
+    """
+    Configuration support for L2VPN with path selection based on MPLS traffic-engineering tunnels
+
+    Args:
+        requests (list): A list of configuration parameters.
+
+    """
+    sources={
+        "source": "10.60.125.44",
+        "config":[]
+    }
+    destinations={
+        "destination": "10.60.125.45",
+        "config":[]
+    }
+    for request in requests:
+        # Configure Source Endpoint
+        temp_source = request["service_config"]["config_rules"][1]["custom"]["resource_value"]
+        endpoints = request["service_endpoint_ids"]
+        config = {
+            "ni_name": temp_source["ni_name"],
+            "remote_router": temp_source["remote_router"],
+            "interface": endpoints[0]["endpoint_uuid"]["uuid"].replace("0/0/0-", ""),
+            "vlan" : temp_source["vlan_id"],
+            "number" : temp_source["vlan_id"] % 10 + 1
+        }
+        sources["config"].append(config)
+
+        # Configure Destination Endpoint
+        temp_destiny = request["service_config"]["config_rules"][2]["custom"]["resource_value"]
+        config = {
+            "ni_name": temp_destiny["ni_name"],
+            "remote_router": temp_destiny["remote_router"],
+            "interface": endpoints[1]["endpoint_uuid"]["uuid"].replace("0/0/3-", ""),
+            "vlan" : temp_destiny["vlan_id"],
+            "number" : temp_destiny["vlan_id"] % 10 + 1
+        }
+        destinations["config"].append(config)
+
+    #cisco_source = cisco_connector(source_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_source = cisco_connector(sources["source"], sources["config"])
+    commands = cisco_source.full_create_command_template()
+    cisco_source.execute_commands(commands)
+
+    #cisco_destiny = cisco_connector(destination_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_destiny = cisco_connector(destinations["destination"], destinations["config"])
+    commands = cisco_destiny.full_create_command_template()
+    cisco_destiny.execute_commands(commands)
+
+def tfs_l2vpn_delete():
+    """
+    Delete L2VPN configurations from Cisco devices.
+
+    This method removes L2VPN configurations from Cisco routers
+
+    Notes:
+        - Uses cisco_connector to generate and execute deletion commands
+        - Clears Network Interface (NI) settings
+    """
+    # Delete Source Endpoint Configuration
+    source_address = "10.60.125.44"
+    cisco_source = cisco_connector(source_address)
+    cisco_source.execute_commands(cisco_source.create_command_template_delete())
+
+    # Delete Destination Endpoint Configuration
+    destination_address = "10.60.125.45"
+    cisco_destiny = cisco_connector(destination_address)
+    cisco_destiny.execute_commands(cisco_destiny.create_command_template_delete())
\ No newline at end of file
diff --git a/src/realizer/e2e/service_types/l3ipowdm_slice.py b/src/realizer/e2e/service_types/l3ipowdm_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fadf4487056d20f65941c0e0e45caf69b249a82
--- /dev/null
+++ b/src/realizer/e2e/service_types/l3ipowdm_slice.py
@@ -0,0 +1,192 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH
+from src.utils.load_template import load_template
+
+def l3ipowdm_slice(rules):
+    """
+    Prepare a Optical service request for an optical slice.
+
+    This method prepares a TeraFlow service request for an optical slice by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        rules (dict, optional): Configuration rules for the optical slice.
+
+    Returns:
+        dict: A TeraFlow service request for optical slice configuration.
+    """
+    transceiver_params = []
+    bandwidth = 0
+
+    logging.debug(f"Preparing L3oWDM slice with rules: {rules}")
+    tfs_requests = []
+    for rule in rules["actions"]:
+        logging.debug(f"Processing rule: {rule['type']}")
+        if rule["type"] == "CREATE_OPTICAL_SLICE":
+            tfs_request = load_template(os.path.join(TEMPLATES_PATH, "Optical_slice.json"))
+            request = optical_slice_template(tfs_request, rules)
+            logging.debug(f"Sending Optical Slice to Optical Controller {request}")
+            tfs_requests.append(request)
+
+        elif rule["type"] == "PROVISION_MEDIA_CHANNEL_OLS_PATH":
+
+            origin_router_id         = rule["content"]["src-sip-uuid"]
+            destination_router_id    = rule["content"]["dest-sip-uuid"]
+            direction                = rule["content"]["direction"]
+            bandwidth                = rule["content"]["bandwidth-ghz"]
+            service_uuid             = rule["content"]["ols-path-uuid"]
+            tenant_uuid              = rule["tenant-uuid"]
+            layer_protocol_name      = rule["content"]["layer-protocol-name"]
+            layer_protocol_qualifier = rule["content"]["layer-protocol-qualifier"]
+            lower_frequency_mhz      = rule["content"]["lower-frequency-mhz"]
+            upper_frequency_mhz      = rule["content"]["upper-frequency-mhz"]
+            link_uuid_path           = rule["content"]["link-uuid-path"]
+            granularity              = rule["content"]["adjustment-granularity"]
+            grid                     = rule["content"]["grid-type"]
+
+            tfs_request = load_template(os.path.join(TEMPLATES_PATH, "TAPI_service.json"))
+
+            tfs_request["services"][0]["service_id"]["service_uuid"]["uuid"] = service_uuid
+            config_rules = tfs_request["services"][0]["service_config"]["config_rules"][0]
+
+            config_rules["tapi_lsp"]["rule_set"]["src"]  = origin_router_id
+            config_rules["tapi_lsp"]["rule_set"]["dst"]  = destination_router_id
+            config_rules["tapi_lsp"]["rule_set"]["uuid"] = service_uuid
+            config_rules["tapi_lsp"]["rule_set"]["bw"]   = str(bandwidth)
+            config_rules["tapi_lsp"]["rule_set"]["tenant_uuid"] = tenant_uuid
+            config_rules["tapi_lsp"]["rule_set"]["direction"]   = direction
+            config_rules["tapi_lsp"]["rule_set"]["layer_protocol_name"] = layer_protocol_name
+            config_rules["tapi_lsp"]["rule_set"]["layer_protocol_qualifier"] = layer_protocol_qualifier
+            config_rules["tapi_lsp"]["rule_set"]["lower_frequency_mhz"] = str(lower_frequency_mhz)
+            config_rules["tapi_lsp"]["rule_set"]["upper_frequency_mhz"] = str(upper_frequency_mhz)
+            config_rules["tapi_lsp"]["rule_set"]["link_uuid_path"] = link_uuid_path
+            config_rules["tapi_lsp"]["rule_set"]["granularity"]    = granularity
+            config_rules["tapi_lsp"]["rule_set"]["grid_type"]      = grid
+
+            logging.debug(f"Sending Media Channel Service to Orchestrator: {tfs_request}")
+            tfs_requests.append(tfs_request)
+
+        elif rule["type"] == "ACTIVATE_TRANSCEIVER":
+            params = {
+                "router_id": rule["content"]["node-uuid"],
+                "router_tp": rule["content"]["termination-point-uuid"],
+                "frequency": rule["content"]["frequency-ghz"],
+                "power":     rule["content"]["tx-power-dbm"]
+            }
+            transceiver_params.append(params)
+        elif rule["type"] == "CONFIG_VPNL3":
+            src_router_id  = rule["content"]["src-node-uuid"]
+
+            if src_router_id == transceiver_params[0]["router_id"]:
+                src_power = transceiver_params[0]["power"]
+                src_frequency = transceiver_params[0]["frequency"]
+                dst_power = transceiver_params[1]["power"]
+                dst_frequency = transceiver_params[1]["frequency"]
+            else:
+                src_power = transceiver_params[1]["power"]
+                src_frequency = transceiver_params[1]["frequency"]
+                dst_power = transceiver_params[0]["power"]
+                dst_frequency = transceiver_params[0]["frequency"]
+
+            src_router_id  = rule["content"]["src-node-uuid"]
+            src_ip_address = rule["content"]["src-ip-address"]
+            src_ip_mask    = rule["content"]["src-ip-mask"]
+            src_vlan_id    = rule["content"]["src-vlan-id"]
+
+            dst_router_id  = rule["content"]["dest-node-uuid"]
+            dst_ip_address = rule["content"]["dest-ip-address"]
+            dst_ip_mask    = rule["content"]["dest-ip-mask"]
+            dst_vlan_id    = rule["content"]["dest-vlan-id"]
+
+            service_uuid = rule["content"]["tunnel-uuid"]
+
+            tfs_request = load_template(os.path.join(TEMPLATES_PATH, "IPoWDM_orchestrator.json"))
+            tfs_request["services"][0]["service_id"]["service_uuid"]["uuid"] = service_uuid
+            config_rules = tfs_request["services"][0]["service_config"]["config_rules"][0]
+            src = config_rules["ipowdm"]["rule_set"]["src"]
+            src.append({
+                'uuid': src_router_id,
+                'ip_address': src_ip_address,
+                'ip_mask': src_ip_mask,
+                'vlan_id': src_vlan_id,
+                'power': src_power,
+                'frequency': src_frequency
+            })
+
+            dst = config_rules["ipowdm"]["rule_set"]["dst"]
+            dst.append({
+                'uuid': dst_router_id,
+                'ip_address': dst_ip_address,
+                'ip_mask': dst_ip_mask,
+                'vlan_id': dst_vlan_id,
+                'power': dst_power,
+                'frequency': dst_frequency
+            })
+
+            config_rules["ipowdm"]["rule_set"]["bw"]        = bandwidth
+            config_rules["ipowdm"]["rule_set"]["uuid"]      = service_uuid
+
+            logging.debug(f"Sending IPoWDM Service to Orchestrator: {tfs_request}")
+            tfs_requests.append(tfs_request)
+
+        else:
+            logging.debug("Unsupported rule type for optical slice: %s", rule["type"])
+    return tfs_requests
+
+def optical_slice_template(template, rule):
+    """
+    Complete the optical slice template with the data provided.
+    Args:
+        template (dict): optical slice template.
+        data (dict): Data to complete the template.
+    Returns:
+        dict: Template completed.
+    """
+
+    for action in rule.get('actions', []):
+        content = action.get('content', {})
+        nodes = content.get('node', [])
+        for node in nodes:
+            for onp in node.get('owned-node-edge-point', []):
+                if 'media-channel-node-edge-point-spec' in onp:
+                    onp['tapi-photonic-media:media-channel-node-edge-point-spec'] = onp.pop('media-channel-node-edge-point-spec')
+
+    for i, sip in enumerate(template['tapi-common:context']['service-interface-point']):
+        if i < len(rule['actions'][0]['content']['service-interface-point']):
+            sip['uuid'] = rule['actions'][0]['content']['service-interface-point'][i]['uuid']
+
+    nodes_template = template['tapi-common:context']['tapi-topology:topology-context']['topology'][0]['node']
+    nodes_data = rule['actions'][0]['content']['node']
+    for new_node in nodes_data:
+        nodes_template.append(new_node)
+
+    links_template = template['tapi-common:context']['tapi-topology:topology-context']['topology'][0]['link']
+    links_rule = rule['actions'][0]['content']['link']
+    for link_t in links_rule:
+        links_template.append(link_t)
+
+    template['tapi-common:context']['uuid'] = rule['actions'][0]['content']['tenant-uuid']
+    template['tapi-common:context']['name'][0]['value'] = rule['network-slice-uuid']
+
+    return template
diff --git a/src/realizers/ixia/NEII_V4.py b/src/realizer/ixia/helpers/NEII_V4.py
similarity index 99%
rename from src/realizers/ixia/NEII_V4.py
rename to src/realizer/ixia/helpers/NEII_V4.py
index f9379d2cc0ddb0aceecb38ad918e0a995b0cebfe..e9bf61a24d0a6b42f6d0179a4d9a92640ab679ec 100644
--- a/src/realizers/ixia/NEII_V4.py
+++ b/src/realizer/ixia/helpers/NEII_V4.py
@@ -16,13 +16,12 @@
 
 from .automatizacion_ne2v4 import automatizacion
 import ipaddress, logging
-from src.Constants import IXIA_IP
 
 class NEII_controller:
-    def __init__(self, ixia_ip=IXIA_IP):
+    def __init__(self, ixia_ip):
         self.ixia_ip = ixia_ip
 
-    def menu_principal(self, ip=IXIA_IP):
+    def menu_principal(self, ip):
         '''
         Inputs:
         Outputs:
diff --git a/src/realizers/ixia/automatizacion_ne2v4.py b/src/realizer/ixia/helpers/automatizacion_ne2v4.py
similarity index 100%
rename from src/realizers/ixia/automatizacion_ne2v4.py
rename to src/realizer/ixia/helpers/automatizacion_ne2v4.py
diff --git a/src/realizer/ixia/ixia_connect.py b/src/realizer/ixia/ixia_connect.py
new file mode 100644
index 0000000000000000000000000000000000000000..456d23a775cfda023404748623acd7f7b9a96e44
--- /dev/null
+++ b/src/realizer/ixia/ixia_connect.py
@@ -0,0 +1,35 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from .helpers.NEII_V4 import NEII_controller
+
+def ixia_connect(requests, ixia_ip):
+    """
+    Connect to the IXIA NEII controller and send the requests.
+    
+    Args:
+        requests (dict): IXIA NEII requests
+        ixia_ip (str): IXIA NEII controller IP address
+    
+    Returns:
+        response (requests.Response): Response from the IXIA NEII controller
+    """
+    response = None
+    neii_controller = NEII_controller(ixia_ip)
+    for intent in requests["services"]:
+        # Send each separate IXIA request
+        response = neii_controller.nscNEII(intent)
+    return response
\ No newline at end of file
diff --git a/src/realizer/ixia/main.py b/src/realizer/ixia/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b8d3d7c4bba812cf9439b0c0d88f6e571558efb
--- /dev/null
+++ b/src/realizer/ixia/main.py
@@ -0,0 +1,93 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+
+def ixia(ietf_intent):
+    """
+    Prepare an Ixia service request based on the IETF intent.
+
+    This method configures an Ixia service request by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+
+    Returns:
+        dict: An Ixia service request for configuration.
+    """
+    metric_bounds = ietf_intent.get("ietf-network-slice-service:network-slice-services", {}) \
+        .get("slo-sle-templates", {}) \
+        .get("slo-sle-template", [{}])[0] \
+        .get("slo-policy", {}) \
+        .get("metric-bound", [])
+
+    # Inicializar valores
+    bandwidth = None
+    latency = None
+    tolerance = None
+
+    # Asignar valores según el tipo de métrica
+    for metric in metric_bounds:
+        metric_type = metric.get("metric-type")
+        bound = metric.get("bound")
+
+        if metric_type == "one-way-bandwidth":
+            bandwidth = bound
+        elif metric_type == "one-way-delay-maximum":
+            latency = bound
+        elif metric_type == "one-way-delay-variation-maximum": 
+            tolerance = bound
+
+    # Construcción del diccionario intent
+    intent = {
+        "src_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slice-service", [{}])[0]
+            .get("sdps", {}).get("sdp", [{}])[0]
+            .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
+            .get("sdp-peering", {}).get("peer-sap-id"),
+
+        "dst_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slice-service", [{}])[0]
+            .get("sdps", {}).get("sdp", [{}, {}])[1]
+            .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
+            .get("sdp-peering", {}).get("peer-sap-id"),
+
+        "vlan_id": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slice-service", [{}])[0]
+            .get("sdps", {}).get("sdp", [{}])[0]
+            .get("service-match-criteria", {}).get("match-criterion", [{}])[0]
+            .get("value"),
+
+        "bandwidth": bandwidth,
+        "latency": latency,
+        "tolerance": tolerance,
+
+        "latency_version": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
+            .get("description"),
+
+        "reliability": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+            .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
+            .get("sle-policy", {}).get("reliability"),
+    }
+
+    logging.info(f"IXIA Intent realized\n")
+    return intent
\ No newline at end of file
diff --git a/src/realizer/main.py b/src/realizer/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb6908af7e82484e09c518b837d53dba11847244
--- /dev/null
+++ b/src/realizer/main.py
@@ -0,0 +1,83 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging
+from .select_way import select_way
+from .nrp_handler import nrp_handler
+from src.utils.safe_get import safe_get
+
+def realizer(ietf_intent, need_nrp=False, order=None, nrp=None, controller_type=None, response=None, rules = None):
+    """
+    Manage the slice creation workflow.
+
+    This method handles two primary scenarios:
+    1. Interact with network controllers for NRP (Network Resource Partition) operations when need_nrp is True
+    2. Slice service selection when need_nrp is False
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        need_nrp (bool, optional): Flag to indicate if NRP operations are needed. Defaults to False.
+        order (str, optional): Type of NRP operation (READ, UPDATE, CREATE). Defaults to None.
+        nrp (dict, optional): Specific Network Resource Partition to operate on. Defaults to None.
+        controller_type (str, optional): Type of controller (TFS, IXIA, E2E). Defaults to None.
+        response (dict, optional): Response built for user feedback. Defaults to None.
+        rules (dict, optional): Specific rules for slice realization. Defaults to None.
+    
+    Returns:
+        dict: A realization request for the specified network slice type.
+    """
+    if need_nrp:
+        # Perform NRP-related operations
+        nrp_view = nrp_handler(order, nrp)
+        return nrp_view
+    else:
+        # Select slice service method
+        if controller_type == "E2E":
+            if isinstance(rules, list) and len(rules) > 0: rules = rules[0]
+            actions = rules.get("actions", []) if (rules and not type(rules)== str) else []
+
+            has_transceiver  = any(a.get("type", "").startswith("XR_AGENT_ACTIVATE_TRANSCEIVER") for a in actions)
+            has_optical      = any(a.get("type", "").startswith("PROVISION_MEDIA_CHANNEL") for a in actions)
+            has_l3           = any(a.get("type", "").startswith("CONFIG_VPNL3") for a in actions)
+            has_l2           = any(a.get("type", "").startswith("CONFIG_VPNL2") for a in actions)
+
+            del_transceiver  = any(a.get("type", "").startswith("DEACTIVATE_XR_AGENT_TRANSCEIVER") for a in actions)
+            del_optical      = any(a.get("type", "").startswith("DEPROVISION_OPTICAL_RESOURCE") for a in actions)
+            del_l3           = any(a.get("type", "").startswith("REMOVE_VPNL3") for a in actions)
+            del_l2           = any(a.get("type", "").startswith("REMOVE_VPNL2") for a in actions)
+
+            if   has_transceiver:         selected_way = "L3oWDM"
+            elif has_optical and has_l3:  selected_way = "L3oWDM"
+            elif has_optical and has_l2:  selected_way = "L2oWDM"
+            elif has_optical:             selected_way = "OPTIC"
+            elif has_l3:                  selected_way = "L3VPN"
+            elif has_l2:                  selected_way = "L2VPN"
+
+            elif del_transceiver:         selected_way = "DEL_L3oWDM"
+            elif del_optical and del_l3:  selected_way = "DEL_L3oWDM"
+            elif del_optical and del_l2:  selected_way = "DEL_L2oWDM"
+            elif del_optical:             selected_way = "DEL_OPTIC"
+            elif del_l3:                  selected_way = "DEL_L3VPN"
+            elif del_l2:                  selected_way = "DEL_L2VPN"
+            else:
+                logging.warning("Cannot determine the realization way from rules. Skipping request.")
+                return None
+            way = selected_way
+        else:
+            way = safe_get(ietf_intent, ['ietf-network-slice-service:network-slice-services', 'slice-service', 0, 'service-tags', 'tag-type', 0, 'tag-type-value', 0])
+        logging.info(f"Selected way: {way}")
+        request = select_way(controller=controller_type, way=way, ietf_intent=ietf_intent, response=response, rules = rules)
+        return request
diff --git a/src/realizer/nrp_handler.py b/src/realizer/nrp_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa08d8deddddb33d8fee93042e1539450c84ce48
--- /dev/null
+++ b/src/realizer/nrp_handler.py
@@ -0,0 +1,72 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, os, json
+from src.config.constants import DATABASE_PATH
+
+def nrp_handler(request, nrp):
+    """
+    Manage Network Resource Partition (NRP) operations.
+
+    This method handles CRUD operations for Network Resource Partitions,
+    interacting with Network Controllers (currently done statically via a JSON-based database file).
+
+    Args:
+        request (str): The type of operation to perform. 
+            Supported values:
+            - "CREATE": Add a new NRP to the database
+            - "READ": Retrieve the current NRP view
+            - "UPDATE": Update an existing NRP (currently a placeholder)
+
+        nrp (dict): The Network Resource Partition details to create or update.
+
+    Returns:
+        None or answer: 
+        - For "CREATE": Returns the response from the controller (currently using a static JSON)
+        - For "READ": Gets the NRP view from the controller (currently using a static JSON)
+        - For "UPDATE": Placeholder for update functionality
+
+    Notes:
+        - Uses a local JSON file "nrp_ddbb.json" to store NRP information as controller operation is not yet defined
+    """
+    if request == "CREATE":
+        # TODO: Implement actual request to Controller to create an NRP
+        logging.debug("Creating NRP")
+
+        # Load existing NRP database
+        with open(os.path.join(DATABASE_PATH, "nrp_ddbb.json"), "r") as archivo:
+            nrp_view = json.load(archivo)
+
+        # Append new NRP to the view
+        nrp_view.append(nrp)
+
+        # Placeholder for controller POST request
+        answer = None
+        return answer
+    elif request == "READ":
+        # TODO: Request to Controller to get topology and current NRP view
+        logging.debug("Reading Topology")
+
+        # Load NRP database
+        with open(os.path.join(DATABASE_PATH, "nrp_ddbb.json"), "r") as archivo:
+            # self.__nrp_view = json.load(archivo)
+            nrp_view = json.load(archivo)
+            return nrp_view
+        
+    elif request == "UPDATE":
+        # TODO: Implement request to Controller to update NRP
+        logging.debug("Updating NRP")
+        answer = ""
\ No newline at end of file
diff --git a/src/realizer/select_way.py b/src/realizer/select_way.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d3cc531b03ef75def47eaadd864ef51c503d900
--- /dev/null
+++ b/src/realizer/select_way.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .ixia.main import ixia
+from .tfs.main  import tfs
+from .e2e.main  import e2e
+
+def select_way(controller=None, way=None, ietf_intent=None, response=None, rules = None):
+    """
+    Determine the method of slice realization.
+
+    Args:
+        controller (str): The controller to use for slice realization. Defaults to None.
+            Supported values:
+            - "IXIA": IXIA NEII for network testing
+            - "TFS": TeraFlow Service for network slice management
+            - "E2E": End-to-End controller for e2e slice management
+        way (str): The type of technology to use. Defaults to None.
+        ietf_intent (dict): IETF-formatted network slice intent. Defaults to None.
+        response (dict): Response built for user feedback. Defaults to None.
+        rules (list, optional): Specific rules for slice realization. Defaults to None.
+
+    Returns:
+        dict: A realization request for the specified network slice type.
+
+    """
+    realizing_request = None
+    if controller == "TFS":
+        realizing_request = tfs(ietf_intent, way, response)
+    elif controller == "IXIA":
+        realizing_request = ixia(ietf_intent)
+    elif controller == "E2E":
+        realizing_request = e2e(ietf_intent, way, response, rules)
+    else:
+        logging.warning(f"Unsupported controller: {controller}. Defaulting to TFS realization.")
+        realizing_request = tfs(ietf_intent, way, response)
+    return realizing_request
\ No newline at end of file
diff --git a/src/realizer/send_controller.py b/src/realizer/send_controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..334de49bdb472d898c968aa0125190a396cf733d
--- /dev/null
+++ b/src/realizer/send_controller.py
@@ -0,0 +1,65 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from flask import current_app
+from .tfs.tfs_connect import tfs_connect
+from .ixia.ixia_connect import ixia_connect
+from .e2e.e2e_connect import e2e_connect
+
+def send_controller(controller_type, requests):
+    """
+    Route provisioning requests to the appropriate network controller.
+    
+    This function acts as a dispatcher that sends configuration requests to
+    different SDN controller types based on the specified controller type.
+    
+    Args:
+        controller_type (str): Type of controller to send requests to:
+            - "TFS": TeraFlow SDN controller
+            - "IXIA": Ixia network emulation controller
+            - "E2E": TeraFlow End-to-End controller
+        requests (dict or list): Configuration requests to be sent to the controller
+    
+    Returns:
+        bool or dict: Response from the controller indicating success/failure
+                     of the provisioning operation. Returns True in DUMMY_MODE.
+                     
+    Notes:
+        - If DUMMY_MODE is enabled in config, returns True without sending requests
+        - Uses IP addresses from Flask application configuration:
+          * TFS_IP for TeraFlow
+          * IXIA_IP for Ixia
+          * TFS_E2E for End-to-End
+        - Logs the controller type that received the request
+        
+    Raises:
+        Exception: May be raised by individual connect functions on communication errors
+    """
+    if current_app.config["DUMMY_MODE"]:
+        return True
+        
+    if controller_type == "TFS":
+        response = tfs_connect(requests, current_app.config["TFS_IP"])
+        logging.info("Request sent to Teraflow")
+    elif controller_type == "IXIA":
+        response = ixia_connect(requests, current_app.config["IXIA_IP"])
+        logging.info("Requests sent to Ixia")
+    elif controller_type == "E2E":
+        response = e2e_connect(requests, current_app.config["TFS_E2E"])
+        logging.info("Requests sent to Teraflow E2E")
+        
+    return response
\ No newline at end of file
diff --git a/src/helpers.py b/src/realizer/tfs/helpers/cisco_connector.py
similarity index 56%
rename from src/helpers.py
rename to src/realizer/tfs/helpers/cisco_connector.py
index 0e150791ac742c02c03aaa755c04a980481b4336..48120069234d6e04f722dfa48443a0ace2051b3c 100644
--- a/src/helpers.py
+++ b/src/realizer/tfs/helpers/cisco_connector.py
@@ -12,87 +12,55 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This file includes original contributions from Telefonica Innovación Digital S.L.
+# This file is an original contribution from Telefonica Innovación Digital S.L.
 
-import logging, requests, json
+import logging
 from netmiko import ConnectHandler
-from src.Constants import DEFAULT_LOGGING_LEVEL
 
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
-    level=DEFAULT_LOGGING_LEVEL,
-    format='%(levelname)s - %(message)s')
-
-#Teraflow
-class tfs_connector():
-    
-    def webui_post(self, tfs_ip, service):
-        user="admin"
-        password="admin"
-        token=""
-        session = requests.Session()
-        session.auth = (user, password)
-        url=f'http://{tfs_ip}/webui'
-        response=session.get(url=url)
-        for item in response.iter_lines():
-            if("csrf_token" in str(item)):
-                string=str(item).split(' requests.Response:
+        """
+        Delete service from TFS NBI.
+        Args:
+            tfs_ip (str): IP address of the TFS instance
+            service_type (str): Type of the service ('L2' or 'L3')
+            service_id (str): Unique identifier of the service to delete
+        Returns:
+            requests.Response: Response object from the DELETE request
+        """
+        user="admin"
+        password="admin"
+        url = f'http://{user}:{password}@{tfs_ip}'
+        if service_type == 'L2':
+            url = url + f'/{NBI_L2_PATH}/vpn-service={service_id}'
+        elif service_type == 'L3':
+            url = url + f'/{NBI_L3_PATH}/vpn-service={service_id}'
+        else:
+            raise ValueError("Invalid service type. Use 'L2' or 'L3'.")
+        response = requests.delete(url, timeout=60)
+        response.raise_for_status()
+        logging.debug('Service deleted successfully')
+        logging.debug("Http response: %s",response.text)
+        return response
\ No newline at end of file
diff --git a/src/realizer/tfs/main.py b/src/realizer/tfs/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..74be42ab9990c89a929d7b4157e8c2ca9f4e4a9e
--- /dev/null
+++ b/src/realizer/tfs/main.py
@@ -0,0 +1,40 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging
+from .service_types.tfs_l2vpn import tfs_l2vpn
+from .service_types.tfs_l3vpn import tfs_l3vpn
+
+def tfs(ietf_intent, way=None, response=None):
+    """
+    Generates a TFS realizing request based on the specified way (L2 or L3).
+
+    Args:
+        ietf_intent (dict): The IETF intent to be realized. Defaults to None.
+        way (str): The type of service to realize ("L2" or "L3"). Defaults to None.
+        response (dict): Response built for user feedback. Defaults to None.
+        
+    Returns:
+        dict: A realization request for the specified network slice type.
+    """
+    if way == "L2":
+        realizing_request = tfs_l2vpn(ietf_intent, response)
+    elif way == "L3":
+        realizing_request = tfs_l3vpn(ietf_intent, response)
+    else:
+        logging.warning(f"Unsupported way: {way}. Defaulting to L2 realization.")
+        realizing_request = tfs_l2vpn(ietf_intent, response)
+    return realizing_request
\ No newline at end of file
diff --git a/src/realizer/tfs/service_types/tfs_l2vpn.py b/src/realizer/tfs/service_types/tfs_l2vpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cf5af9af911f8483278dde743ff752ee8dfd743
--- /dev/null
+++ b/src/realizer/tfs/service_types/tfs_l2vpn.py
@@ -0,0 +1,186 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH, NBI_L2_PATH
+from src.utils.load_template import load_template
+from src.utils.safe_get import safe_get
+from ..helpers.cisco_connector import cisco_connector
+from flask import current_app
+
+def tfs_l2vpn(ietf_intent, response):
+    """
+    Translate slice intent into a TeraFlow service request.
+
+    This method prepares a L2VPN service request by:
+    1. Defining endpoint routers
+    2. Loading a service template
+    3. Generating a unique service UUID
+    4. Configuring service endpoints
+    5. Adding QoS constraints
+    6. Preparing configuration rules for network interfaces
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        response (dict): Response data containing slice information.
+
+    Returns:
+        dict: A TeraFlow service request for L2VPN configuration.
+
+    """
+    # Hardcoded router endpoints
+    # TODO (should be dynamically determined)
+    origin_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 0, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not origin_router_id:
+        logging.warning("Origin router ID not found in the intent. Skipping L2VPN realization.")
+        return None
+    origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    destination_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 1, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not destination_router_id:
+        logging.warning("Destination router ID not found in the intent. Skipping L2VPN realization.")
+        return None
+    destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+    slice = next((d for d in response if d.get("id") == id), None)
+
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        # Load L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "L2-VPN_template_empty.json"))["services"][0]
+
+        # Configure service UUID
+        tfs_request["service_id"]["service_uuid"]["uuid"] = ietf_intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]["id"]
+
+        # Configure service endpoints
+        for endpoint in tfs_request["service_endpoint_ids"]:
+            endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
+            endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
+
+        # Add service constraints
+        for constraint in slice.get("requirements", []):
+            tfs_request["service_constraints"].append({"custom": constraint})
+
+        # Add configuration rules
+        for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
+            router_id = origin_router_id if i == 1 else destination_router_id
+            router_if = origin_router_if if i == 1 else destination_router_if
+            resource_value = config_rule["custom"]["resource_value"]
+
+            sdp_index = i - 1
+            vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
+            if vlan_value:
+                resource_value["vlan_id"] = int(vlan_value)
+            resource_value["circuit_id"] = vlan_value
+            resource_value["remote_router"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
+            config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
+
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        #self.path = NBI_L2_PATH
+        # Load IETF L2VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "ietfL2VPN_template_empty.json"))
+        
+        # Add path to the request
+        tfs_request["path"] = NBI_L2_PATH
+
+        # Generate service UUID
+        full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        uuid_only = full_id.split("slice-service-")[-1]
+        tfs_request["ietf-l2vpn-svc:vpn-service"][0]["vpn-id"] = uuid_only
+
+        # Configure service endpoints
+        sites = tfs_request["ietf-l2vpn-svc:vpn-service"][0]["site"]
+        sdps = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"]
+
+        for i, site in enumerate(sites):
+            is_origin = (i == 0)
+            router_id = origin_router_id if is_origin else destination_router_id
+            sdp = sdps[0] if is_origin else sdps[1]
+            site["site-id"] = router_id
+            site["site-location"] = sdp["node-id"]
+            site["site-network-access"]["interface"]["ip-address"] = sdp["sdp-ip-address"]
+
+    logging.info(f"L2VPN Intent realized")
+    return tfs_request
+
+def tfs_l2vpn_support(requests):
+    """
+    Configuration support for L2VPN with path selection based on MPLS traffic-engineering tunnels
+
+    Args:
+        requests (list): A list of configuration parameters.
+
+    """
+    sources={
+        "source": "10.60.125.44",
+        "config":[]
+    }
+    destinations={
+        "destination": "10.60.125.45",
+        "config":[]
+    }
+    for request in requests:
+        # Configure Source Endpoint
+        temp_source = request["service_config"]["config_rules"][1]["custom"]["resource_value"]
+        endpoints = request["service_endpoint_ids"]
+        config = {
+            "ni_name": temp_source["ni_name"],
+            "remote_router": temp_source["remote_router"],
+            "interface": endpoints[0]["endpoint_uuid"]["uuid"].replace("0/0/0-", ""),
+            "vlan" : temp_source["vlan_id"],
+            "number" : temp_source["vlan_id"] % 10 + 1
+        }
+        sources["config"].append(config)
+
+        # Configure Destination Endpoint
+        temp_destiny = request["service_config"]["config_rules"][2]["custom"]["resource_value"]
+        config = {
+            "ni_name": temp_destiny["ni_name"],
+            "remote_router": temp_destiny["remote_router"],
+            "interface": endpoints[1]["endpoint_uuid"]["uuid"].replace("0/0/3-", ""),
+            "vlan" : temp_destiny["vlan_id"],
+            "number" : temp_destiny["vlan_id"] % 10 + 1
+        }
+        destinations["config"].append(config)
+        
+    #cisco_source = cisco_connector(source_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_source = cisco_connector(sources["source"], sources["config"])
+    commands = cisco_source.full_create_command_template()
+    cisco_source.execute_commands(commands)
+
+    #cisco_destiny = cisco_connector(destination_address, ni_name, remote_router, vlan, vlan % 10 + 1)
+    cisco_destiny = cisco_connector(destinations["destination"], destinations["config"])
+    commands = cisco_destiny.full_create_command_template()
+    cisco_destiny.execute_commands(commands)
+
+def tfs_l2vpn_delete():
+    """
+    Delete L2VPN configurations from Cisco devices.
+
+    This method removes L2VPN configurations from Cisco routers
+
+    Notes:
+        - Uses cisco_connector to generate and execute deletion commands
+        - Clears Network Interface (NI) settings
+    """
+    # Delete Source Endpoint Configuration
+    source_address = "10.60.125.44"
+    cisco_source = cisco_connector(source_address)
+    cisco_source.execute_commands(cisco_source.create_command_template_delete())
+
+    # Delete Destination Endpoint Configuration
+    destination_address = "10.60.125.45"
+    cisco_destiny = cisco_connector(destination_address)
+    cisco_destiny.execute_commands(cisco_destiny.create_command_template_delete())
\ No newline at end of file
diff --git a/src/realizer/tfs/service_types/tfs_l3vpn.py b/src/realizer/tfs/service_types/tfs_l3vpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..93befbca333b7a63ad477ae5b951e5ced486e776
--- /dev/null
+++ b/src/realizer/tfs/service_types/tfs_l3vpn.py
@@ -0,0 +1,141 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+
+import logging, os
+from src.config.constants import TEMPLATES_PATH, NBI_L3_PATH
+from src.utils.load_template import load_template
+from src.utils.safe_get import safe_get
+from flask import current_app
+
+def tfs_l3vpn(ietf_intent, response):
+    """
+    Translate L3VPN (Layer 3 Virtual Private Network) intent into a TeraFlow service request.
+
+    Similar to __tfs_l2vpn, but configured for Layer 3 VPN:
+    1. Defines endpoint routers
+    2. Loads service template
+    3. Generates unique service UUID
+    4. Configures service endpoints
+    5. Adds QoS constraints
+    6. Prepares configuration rules for network interfaces
+
+    Args:
+        ietf_intent (dict): IETF-formatted network slice intent.
+        response (dict): Response data containing slice information.
+
+    Returns:
+        dict: A TeraFlow service request for L3VPN configuration.
+    """
+    # Hardcoded router endpoints
+    # TODO (should be dynamically determined)
+    origin_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 0, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not origin_router_id:
+        logging.warning("Origin router ID not found in the intent. Skipping L3VPN realization.")
+        return None
+    origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    destination_router_id = safe_get(ietf_intent, ["ietf-network-slice-service:network-slice-services", "slice-service", 0, "sdps", "sdp", 1, "attachment-circuits", "attachment-circuit", 0, "sdp-peering", "peer-sap-id"])
+    if not destination_router_id:
+        logging.warning("Destination router ID not found in the intent. Skipping L3VPN realization.")
+        return None
+    destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
+    id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+    slice = next((d for d in response if d.get("id") == id), None)
+
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        # Load L3VPN service template
+        tfs_request = load_template(os.path.join(TEMPLATES_PATH, "L3-VPN_template_empty.json"))["services"][0]
+        # Configure service UUID
+        tfs_request["service_id"]["service_uuid"]["uuid"] = ietf_intent['ietf-network-slice-service:network-slice-services']['slice-service'][0]["id"]
+
+        # Configure service endpoints
+        for endpoint in tfs_request["service_endpoint_ids"]:
+            endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
+            endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
+
+        # Add service constraints
+        for constraint in slice.get("requirements", []):
+            tfs_request["service_constraints"].append({"custom": constraint})
+
+        # Add configuration rules
+        for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
+            router_id = origin_router_id if i == 1 else destination_router_id
+            router_if = origin_router_if if i == 1 else destination_router_if
+            resource_value = config_rule["custom"]["resource_value"]
+
+            sdp_index = i - 1
+            vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
+            resource_value["router_id"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["vlan_id"] = int(vlan_value)
+            resource_value["address_ip"] = destination_router_id if i == 1 else origin_router_id
+            resource_value["policy_AZ"] = "policyA"
+            resource_value["policy_ZA"] = "policyB"
+            resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
+            config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
+    
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        #self.path = NBI_L3_PATH
+
+        # Load IETF L3VPN service template
+        tfs_request =  load_template(os.path.join(TEMPLATES_PATH, "ietfL3VPN_template_empty.json"))
+
+        # Add path to the request
+        tfs_request["path"] = NBI_L3_PATH
+
+        # Generate service UUID
+        full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+        tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["vpn-services"]["vpn-service"][0]["vpn-id"] = full_id
+        # Configure service endpoints
+        for i, site in enumerate(tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["sites"]["site"]):
+
+            # Determine if origin or destination
+            is_origin = (i == 0)
+            sdp_index = 0 if is_origin else 1
+            location = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["node-id"]
+            router_id = origin_router_id if is_origin else destination_router_id
+            router_if = origin_router_if if is_origin else destination_router_if
+
+            # Assign common values
+            site["site-id"] = f"site_{location}"
+            site["locations"]["location"][0]["location-id"] = location
+            site["devices"]["device"][0]["device-id"] = router_id
+            site["devices"]["device"][0]["location"] = location
+
+            access = site["site-network-accesses"]["site-network-access"][0]
+            access["site-network-access-id"] = router_if
+            access["device-reference"] = router_id
+            access["vpn-attachment"]["vpn-id"] = full_id
+
+            # Aplicar restricciones QoS
+            for constraint in slice.get("requirements", []):
+                ctype = constraint["constraint_type"]
+                cvalue = float(constraint["constraint_value"])
+                if constraint["constraint_type"].startswith("one-way-bandwidth"):
+                        unit = constraint["constraint_type"].split("[")[-1].rstrip("]")
+                        multiplier = {"bps": 1, "kbps": 1_000, "Mbps": 1_000_000, "Gbps": 1_000_000_000}.get(unit, 1)
+                        value = int(cvalue * multiplier)
+                        access["service"]["svc-input-bandwidth"] = value
+                        access["service"]["svc-output-bandwidth"] = value
+                elif ctype == "one-way-delay-maximum[milliseconds]":
+                    access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["latency"]["latency-boundary"] = int(cvalue)
+                elif ctype == "availability[%]":
+                    access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["bandwidth"]["guaranteed-bw-percent"] = int(cvalue)
+                elif ctype == "mtu[bytes]":
+                    access["service"]["svc-mtu"] = int(cvalue)
+
+    
+    logging.info(f"L3VPN Intent realized")
+    #self.answer[self.subnet]["VLAN"] = vlan_value
+    return tfs_request
\ No newline at end of file
diff --git a/src/realizer/tfs/tfs_connect.py b/src/realizer/tfs/tfs_connect.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c8334dff5d5cc82d3feaa037dce6f1a7da5ccfd
--- /dev/null
+++ b/src/realizer/tfs/tfs_connect.py
@@ -0,0 +1,48 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from .helpers.tfs_connector import tfs_connector
+from flask import current_app
+from src.utils.send_response import send_response
+from .service_types.tfs_l2vpn import tfs_l2vpn_support
+
+def tfs_connect(requests, tfs_ip):
+    """
+    Connect to TeraflowSDN (TFS) controller and upload services.
+    
+    Args:
+        requests (dict): Dictionary containing services to upload
+        tfs_ip (str): IP address of the TFS controller
+    
+    Returns:
+        response (requests.Response): Response from TFS controller
+    """       
+    if current_app.config["UPLOAD_TYPE"] == "WEBUI":
+        response = tfs_connector().webui_post(tfs_ip, requests)
+    elif current_app.config["UPLOAD_TYPE"] == "NBI":
+        for intent in requests["services"]:
+            # Send each separate NBI request
+            path = intent.pop("path")
+            response = tfs_connector().nbi_post(tfs_ip, intent, path)
+
+            if not response.ok:
+                return send_response(False, code=response.status_code, message=f"Teraflow upload failed. Response: {response.text}")
+    
+    # For deploying an L2VPN with path selection (not supported by Teraflow)
+    if current_app.config["TFS_L2VPN_SUPPORT"]:
+        tfs_l2vpn_support(requests["services"])
+    
+    return response
\ No newline at end of file
diff --git a/src/slice_ddbb.json b/src/slice_ddbb.json
deleted file mode 100644
index 0637a088a01e8ddab3bf3fa98dbe804cbde1a0dc..0000000000000000000000000000000000000000
--- a/src/slice_ddbb.json
+++ /dev/null
@@ -1 +0,0 @@
-[]
\ No newline at end of file
diff --git a/src/templates/IPoWDM_orchestrator.json b/src/templates/IPoWDM_orchestrator.json
new file mode 100644
index 0000000000000000000000000000000000000000..60eb0dbaaebca3e0a35d5b9007121119a60443f8
--- /dev/null
+++ b/src/templates/IPoWDM_orchestrator.json
@@ -0,0 +1,31 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}},
+                "service_uuid": {"uuid": "TAPI LSP"}
+            },
+            "service_type": 12,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}},"endpoint_uuid": {"uuid": "mgmt"}},
+                {"device_id": {"device_uuid": {"uuid": "TFS-PACKET"}},"endpoint_uuid": {"uuid": "mgmt"}}
+
+            ],
+            "service_constraints": [],
+
+            "service_config": {"config_rules": [
+                {"action": 1, "ipowdm": {
+                    "endpoint_id": {
+                        "device_id": {"device_uuid": {"uuid": "TFS-PACKET"}},
+                        "endpoint_uuid": {"uuid": "mgmt"}
+                    },
+                    "rule_set": {
+                        "src"  : [],
+                        "dst"  : []
+                    }
+                }}
+            ]}
+        }
+    ]
+}
\ No newline at end of file
diff --git a/src/templates/Optical_slice.json b/src/templates/Optical_slice.json
new file mode 100644
index 0000000000000000000000000000000000000000..94c87fe03f52b7a10acfc537eb5dbabfc6b4a46b
--- /dev/null
+++ b/src/templates/Optical_slice.json
@@ -0,0 +1,28 @@
+{
+   "tapi-common:context" : {
+      "name" : [
+         {
+            "value" : ""
+         }
+      ],
+      "service-interface-point" : [
+         {
+            "uuid" : ""
+         },
+         {
+            "uuid" : ""
+         }
+      ],
+      "tapi-topology:topology-context" : {
+         "topology" : [
+            {
+               "link" : [
+               ],
+               "node" : [
+               ]
+            }
+         ]
+      },
+      "uuid" : ""
+   }
+}
diff --git a/src/templates/TAPI_service.json b/src/templates/TAPI_service.json
new file mode 100644
index 0000000000000000000000000000000000000000..1b09a04cb809d0d959340e6d23a15d65f7f372b0
--- /dev/null
+++ b/src/templates/TAPI_service.json
@@ -0,0 +1,43 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}},
+                "service_uuid": {"uuid": "TAPI LSP"}
+            },
+            "service_type": 11,
+            "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}},"endpoint_uuid": {"uuid": "mgmt"}},
+                {"device_id": {"device_uuid": {"uuid": "TFS-PACKET"}},"endpoint_uuid": {"uuid": "mgmt"}}
+
+            ],
+            "service_constraints": [],
+
+            "service_config": {"config_rules": [
+                {"action": 1, "tapi_lsp": {
+                    "endpoint_id": {
+                        "device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}},
+                        "endpoint_uuid": {"uuid": "mgmt"}
+                    },
+                    "rule_set": {
+                        "src": "",
+                        "dst": "",
+                        "uuid": "",
+                        "bw": "",
+                        "tenant_uuid": "",
+                        "direction": "",
+                        "layer_protocol_name": "",
+                        "layer_protocol_qualifier": "",
+                        "lower_frequency_mhz": "",
+                        "upper_frequency_mhz": "",
+                        "link_uuid_path": [
+                        ],
+                        "granularity": "",
+                        "grid_type": ""
+                    }
+                }}
+            ]}
+        }
+    ]
+}
\ No newline at end of file
diff --git a/src/templates/ietf_template_empty.json b/src/templates/ietf_template_empty.json
index cdaf66cdad3fbd7f01c09a7987cf8729600952b0..c484a4a2991e0b34ce691ee14125c2fddb00fa41 100644
--- a/src/templates/ietf_template_empty.json
+++ b/src/templates/ietf_template_empty.json
@@ -29,10 +29,14 @@
            "id":"5GSliceMapping",
            "description":"example 5G Slice mapping",
            "service-tags":{
-              "tag-type":{
-                 "tag-type":"",
-                 "value":""
+            "tag-type": [
+              {
+                "tag-type": "",
+                "tag-type-value": [
+                  ""
+                ]
               }
+            ]
            },
            "slo-sle-policy":{
               "slo-sle-template":""
diff --git a/src/tests/requests/3ggpp_template_green.json b/src/tests/requests/3ggpp_template_green.json
new file mode 100644
index 0000000000000000000000000000000000000000..67a1367b093b84c1dd589c803cee55cb130ce232
--- /dev/null
+++ b/src/tests/requests/3ggpp_template_green.json
@@ -0,0 +1,176 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "CNSliceSubnet1",
+      "RANSliceSubnet1"
+    ]
+  },
+  "CNSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "CN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "CNId",
+        "pLMNInfoList": null,
+        "CNSliceSubnetProfile": {
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+            "dLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "dLLatency": 8,
+          "uLLatency": 8,
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "MidhaulSliceSubnet1"
+    ]
+  },
+  "MidhaulSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "EnergyEfficiency": 5,
+          "EnergyConsumption": 18000,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 650
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-UP1",
+      "EpTransport DU3"
+    ]
+  },
+  "BackhaulSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+            "dLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 40,
+            "MaxThpt": 80
+          },
+          "dLLatency": 8,
+          "uLLatency": 8,
+          "EnergyEfficiency": 400,
+          "EnergyConsumption": 200,
+          "RenewableEnergyUsage": 0.5,
+          "CarbonEmissions": 100
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-UP2",
+      "EpTransport UPF"
+    ]
+  },
+  "EpTransport CU-UP1": {
+    "IpAddress": "1.1.1.100",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "300"
+    },
+    "NextHopInfo": "1.1.1.1",
+    "qosProfile": "5QI100",
+    "EpApplicationRef": [
+      "EP_F1U CU-UP1"
+    ]
+  },
+  "EP_F1U CU-UP1": {
+    "localAddress": "100.1.1.100",
+    "remoteAddress": "200.1.1.100",
+    "epTransportRef": [
+      "EpTransport CU-UP1"
+    ]
+  },
+  "EpTransport DU3": {
+    "IpAddress": "2.2.2.100",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "300"
+    },
+    "NextHopInfo": "2.2.2.2",
+    "qosProfile": "5QI100",
+    "EpApplicationRef": [
+      "EP_F1U DU3"
+    ]
+  },
+  "EP_F1U DU3": {
+    "localAddress": "200.1.1.100",
+    "remoteAddress": "100.1.1.100",
+    "epTransportRef": [
+      "EpTransport DU3"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json
new file mode 100644
index 0000000000000000000000000000000000000000..ed80ec01d699a6bea39f99b19af1e22550c1c851
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json
@@ -0,0 +1,267 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "BackhaulSliceSubnetN2",
+	  "BackhaulSliceSubnetN31",
+	  "BackhaulSliceSubnetN32"
+    ]
+  },
+  "BackhaulSliceSubnetN2": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N2",
+      "EpTransport AMF-N2"
+    ]
+  },
+  "BackhaulSliceSubnetN31": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 50,
+            "MaxThpt": 100
+          },
+          "dLLatency": 10,
+          "uLLatency": 10
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N31",
+      "EpTransport UPF-N31"
+    ]
+  },
+  "BackhaulSliceSubnetN32": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 5,
+          "uLLatency": 5
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N32",
+      "EpTransport UPF-N32"
+    ]
+  },
+  "EpTransport CU-N2": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 CU-N2"
+    ]
+  },
+  "EP_N2 CU-N2": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.105",
+    "epTransportRef": [
+      "EpTransport CU-N2"
+    ]
+  },
+  "EpTransport AMF-N2": {
+    "IpAddress": "10.60.60.105",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 AMF-N2"
+    ]
+  },
+  "EP_N2 AMF-N2": {
+    "localAddress": "10.60.60.105",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N2"
+    ]
+  },
+  "EpTransport CU-N32": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 CU-N32"
+    ]
+  },
+  "EP_N3 CU-N32": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.10.6",
+    "epTransportRef": [
+      "EpTransport CU-N32"
+    ]
+  },
+  "EpTransport UPF-N32": {
+    "IpAddress": "10.60.10.6",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N32"
+    ]
+  },
+  "EP_N3 UPF-N32": {
+    "localAddress": "10.60.10.6",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N32"
+    ]
+  },
+  "EpTransport CU-N31": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 CU-N31"
+    ]
+  },
+  "EP_N3 CU-N31": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.106",
+    "epTransportRef": [
+      "EpTransport CU-N31"
+    ]
+  },
+  "EpTransport UPF-N31": {
+    "IpAddress": "10.60.60.106",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N31"
+    ]
+  },
+  "EP_N3 UPF-N31": {
+    "localAddress": "10.60.60.106",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N31"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json
new file mode 100644
index 0000000000000000000000000000000000000000..9dab29465a9d992e795a50f1a063bbb5ca05104e
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json
@@ -0,0 +1,131 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "BackhaulSliceSubnetN2"
+    ]
+  },
+  "BackhaulSliceSubnetN2": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 1,
+            "MaxThpt": 2
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 1,
+            "MaxThpt": 2
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N2",
+      "EpTransport AMF-N2"
+    ]
+  },
+  "EpTransport CU-N2": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 CU-N2"
+    ]
+  },
+  "EP_N2 CU-N2": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.105",
+    "epTransportRef": [
+      "EpTransport CU-N2"
+    ]
+  },
+  "EpTransport AMF-N2": {
+    "IpAddress": "10.60.60.105",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_N2 AMF-N2"
+    ]
+  },
+  "EP_N2 AMF-N2": {
+    "localAddress": "10.60.60.105",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N2"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json
new file mode 100644
index 0000000000000000000000000000000000000000..d287a04fbf1da5202daf1a71c98ff2c509535523
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json
@@ -0,0 +1,131 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+	  "BackhaulSliceSubnetN32"
+    ]
+  },
+  "BackhaulSliceSubnetN32": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 5,
+          "uLLatency": 5
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N32",
+      "EpTransport UPF-N32"
+    ]
+  },
+  "EpTransport CU-N32": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 CU-N32"
+    ]
+  },
+  "EP_N3 CU-N32": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.10.6",
+    "epTransportRef": [
+      "EpTransport CU-N32"
+    ]
+  },
+  "EpTransport UPF-N32": {
+    "IpAddress": "10.60.10.6",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N32"
+    ]
+  },
+  "EP_N3 UPF-N32": {
+    "localAddress": "10.60.10.6",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N32"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json
new file mode 100644
index 0000000000000000000000000000000000000000..55232e8eb5a17bfa803995f0751933c82e1df9ec
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json
@@ -0,0 +1,131 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 310,
+            "MaxThpt": 620
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 160,
+            "MaxThpt": 320
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+	  "BackhaulSliceSubnetN31"
+    ]
+  },
+  "BackhaulSliceSubnetN31": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "BackhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 50,
+            "MaxThpt": 100
+          },
+          "dLLatency": 10,
+          "uLLatency": 10
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-N31",
+      "EpTransport UPF-N31"
+    ]
+  },
+  "EpTransport CU-N31": {
+    "IpAddress": "10.60.11.3",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 CU-N31"
+    ]
+  },
+  "EP_N3 CU-N31": {
+    "localAddress": "10.60.11.3",
+    "remoteAddress": "10.60.60.106",
+    "epTransportRef": [
+      "EpTransport CU-N31"
+    ]
+  },
+  "EpTransport UPF-N31": {
+    "IpAddress": "10.60.60.106",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_N3 UPF-N31"
+    ]
+  },
+  "EP_N3 UPF-N31": {
+    "localAddress": "10.60.60.106",
+    "remoteAddress": "10.60.11.3",
+    "epTransportRef": [
+      "EpTransport UPF-N31"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json b/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json
new file mode 100644
index 0000000000000000000000000000000000000000..300f8666fbb20d501052194393ab5232d22d3424
--- /dev/null
+++ b/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json
@@ -0,0 +1,267 @@
+{
+  "NetworkSlice1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "serviceProfileList": [],
+    "networkSliceSubnetRef": "TopSliceSubnet1"
+  },
+  "TopSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "TOP_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "TopId",
+        "pLMNInfoList": null,
+        "TopSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 410,
+            "MaxThpt": 820
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 210,
+            "MaxThpt": 420
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "RANSliceSubnet1"
+    ]
+  },
+  "RANSliceSubnet1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "RANId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 410,
+            "MaxThpt": 820
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 210,
+            "MaxThpt": 220
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "networkSliceSubnetRef": [
+      "MidhaulSliceSubnetF1c",
+	  "MidhaulSliceSubnetF1u1",
+	  "MidhaulSliceSubnetF1u2"
+    ]
+  },
+  "MidhaulSliceSubnetF1c": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 10,
+            "MaxThpt": 20
+          },
+          "dLLatency": 20,
+          "uLLatency": 20
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-F1c",
+      "EpTransport DU-F1c"
+    ]
+  },
+  "MidhaulSliceSubnetF1u1": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 5,
+          "uLLatency": 5
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-F1u1",
+      "EpTransport DU-F1u1"
+    ]
+  },
+  "MidhaulSliceSubnetF1u2": {
+    "operationalState": "",
+    "administrativeState": "",
+    "nsInfo": {},
+    "managedFunctionRef": [],
+    "networkSliceSubnetType": "RAN_SLICESUBNET",
+    "SliceProfileList": [
+      {
+        "sliceProfileId": "MidhaulId",
+        "pLMNInfoList": null,
+        "RANSliceSubnetProfile": {
+          "dLThptPerSliceSubnet": {
+            "GuaThpt": 200,
+            "MaxThpt": 400
+          },
+          "uLThptPerSliceSubnet": {
+            "GuaThpt": 100,
+            "MaxThpt": 200
+          },
+          "dLLatency": 10,
+          "uLLatency": 10
+        }
+      }
+    ],
+    "EpTransport": [
+      "EpTransport CU-F1u2",
+      "EpTransport DU-F1u2"
+    ]
+  },
+  "EpTransport CU-F1c": {
+    "IpAddress": "10.60.10.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_F1C CU-F1c"
+    ]
+  },
+  "EP_F1C CU-F1c": {
+    "localAddress": "10.60.10.2",
+    "remoteAddress": "10.60.11.2",
+    "epTransportRef": [
+      "EpTransport CU-F1c"
+    ]
+  },
+  "EpTransport DU-F1c": {
+    "IpAddress": "10.60.11.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "100"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "A",
+    "EpApplicationRef": [
+      "EP_F1C DU-F1c"
+    ]
+  },
+  "EP_F1C DU-F1c": {
+    "localAddress": "10.60.11.2",
+    "remoteAddress": "10.60.10.2",
+    "epTransportRef": [
+      "EpTransport DU-F1c"
+    ]
+  },
+  "EpTransport CU-F1u1": {
+    "IpAddress": "10.60.10.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_F1U CU-F1u1"
+    ]
+  },
+  "EP_F1U CU-F1u1": {
+    "localAddress": "10.60.10.2",
+    "remoteAddress": "10.60.11.2",
+    "epTransportRef": [
+      "EpTransport CU-F1c"
+    ]
+  },
+  "EpTransport DU-F1u1": {
+    "IpAddress": "10.60.11.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "101"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "B",
+    "EpApplicationRef": [
+      "EP_F1U DU-F1u1"
+    ]
+  },
+  "EP_F1U DU-F1u1": {
+    "localAddress": "10.60.11.2",
+    "remoteAddress": "10.60.10.2",
+    "epTransportRef": [
+      "EpTransport DU-F1u1"
+    ]
+  },
+  "EpTransport CU-F1u2": {
+    "IpAddress": "10.60.10.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "5.5.5.5",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_F1U CU-F1u2"
+    ]
+  },
+  "EP_F1U CU-F1u2": {
+    "localAddress": "10.60.10.2",
+    "remoteAddress": "10.60.11.2",
+    "epTransportRef": [
+      "EpTransport CU-F1u2"
+    ]
+  },
+  "EpTransport DU-F1u2": {
+    "IpAddress": "10.60.11.2",
+    "logicalInterfaceInfo": {
+      "logicalInterfaceType": "VLAN",
+      "logicalInterfaceId": "102"
+    },
+    "NextHopInfo": "4.4.4.4",
+    "qosProfile": "C",
+    "EpApplicationRef": [
+      "EP_F1U DU-F1u2"
+    ]
+  },
+  "EP_F1U DU-F1u2": {
+    "localAddress": "10.60.11.2",
+    "remoteAddress": "10.60.10.2",
+    "epTransportRef": [
+      "EpTransport DU-F1u2"
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/P2MP.json b/src/tests/requests/P2MP.json
new file mode 100644
index 0000000000000000000000000000000000000000..02875dcf74a61c00b953525b93814ccd1167891a
--- /dev/null
+++ b/src/tests/requests/P2MP.json
@@ -0,0 +1,108 @@
+{
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {
+                        "id": "LOW-DELAY",
+                        "description": "Prefer direct link: delay <= 2ms",
+                        "slo-policy": {
+                            "metric-bound": [
+                                {
+                                    "metric-type": "two-way-delay-maximum",
+                                    "metric-unit": "milliseconds",
+                                    "bound": 2
+                                }
+                            ]
+                        }
+                    }
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "slice-long",
+                    "description": "Slice tolerant to intermediate hops",
+                    "slo-sle-policy": {
+                        "slo-sle-template": "LOW-DELAY"
+                    },
+                    "sdps": {
+                        "sdp": [
+                            {
+                                "id": "T1.2",
+                                "node-id": "T1.2",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r1",
+                                            "ac-ipv4-address": "10.10.1.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            },
+                            {
+                                "id": "T1.1",
+                                "node-id": "T1.1",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r2",
+                                            "ac-ipv4-address": "10.10.2.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            },
+                            {
+                                "id": "T2.1",
+                                "node-id": "T2.1",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r3",
+                                            "ac-ipv4-address": "10.10.3.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            },
+                            {
+                                "id": "T1.3",
+                                "node-id": "T1.3",
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "id": "ac-r3",
+                                            "ac-ipv4-address": "10.10.4.1",
+                                            "ac-ipv4-prefix-length": 24
+                                        }
+                                    ]
+                                }
+                            }
+                        ]
+                    },
+                    "connection-groups": {
+                        "connection-group": [
+                            {
+                                "id": "cg-long",
+                                "connectivity-type": "ietf-vpn-common:any-to-any",
+                                "connectivity-construct": [
+                                    {
+                                        "id": "cc-p2mp",
+                                        "p2mp-sdp": {
+                                            "root-sdp-id": "T2.1",
+                                            "leaf-sdp-id": [
+                                                "T1.1",
+                                                "T1.2",
+                                                "T1.3"
+
+                                            ]
+                                        }
+                                    }
+                                ]
+                            }
+                        ]
+                    }
+                }
+            ]
+        }
+    }
\ No newline at end of file
diff --git a/src/tests/requests/create_slice_1.json b/src/tests/requests/create_slice_1.json
new file mode 100644
index 0000000000000000000000000000000000000000..bcefe01525f7413a98f89c8fa5266b7c6cec8329
--- /dev/null
+++ b/src/tests/requests/create_slice_1.json
@@ -0,0 +1,81 @@
+{
+    "ietf-network-slice-service:network-slice-services": {
+        "slo-sle-templates": {
+            "slo-sle-template": [
+                {
+                    "id": "LOW-DELAY",
+                    "description": "optical-slice",
+                    "slo-policy": {
+                        "metric-bound": [
+                            {
+                                "metric-type": "two-way-delay-maximum",
+                                "metric-unit": "milliseconds",
+                                "bound": 2
+                            }
+                        ]
+                    }
+                }
+            ]
+        },
+        "slice-service": [
+            {
+                "id": "slice-long",
+                "description": "Slice tolerant to intermediate hops",
+                "slo-sle-policy": {
+                    "slo-sle-template": "LOW-DELAY"
+                },
+                "sdps": {
+                    "sdp": [
+                        {
+                            "id": "Ethernet110",
+                            "node-id": "Phoenix-1",
+                            "attachment-circuits": {
+                                "attachment-circuit": [
+                                    {
+                                        "id": "ac-r1",
+                                        "ac-ipv4-address": "10.10.1.1",
+                                        "ac-ipv4-prefix-length": 24
+                                    }
+                                ]
+                            }
+                        },
+                        {
+                            "id": "Ethernet220",
+                            "node-id": "Phoenix-2",
+                            "attachment-circuits": {
+                                "attachment-circuit": [
+                                    {
+                                        "id": "ac-r2",
+                                        "ac-ipv4-address": "10.10.2.1",
+                                        "ac-ipv4-prefix-length": 24
+                                    }
+                                ]
+                            }
+                        }
+                    ]
+                },
+                "connection-groups": {
+                    "connection-group": [
+                        {
+                            "id": "cg-long",
+                            "connectivity-type": "ietf-vpn-common:any-to-any",
+                            "connectivity-construct": [
+                                {
+                                    "id": "cc-long",
+                                    "a2a-sdp": [
+                                        {
+                                            "sdp-id": "Ethernet110"
+                                        },
+                                        {
+                                            "sdp-id": "Ethernet220"
+                                        }
+                                    ]
+                                }
+                            ]
+                        }
+                    ]
+                }
+            }
+        ]
+    }
+}
\ No newline at end of file
diff --git a/src/tests/requests/ietf_green_request.json b/src/tests/requests/ietf_green_request.json
new file mode 100644
index 0000000000000000000000000000000000000000..5edae753b82526b19b865cc8c834260d580679dd
--- /dev/null
+++ b/src/tests/requests/ietf_green_request.json
@@ -0,0 +1,172 @@
+{
+  "ietf-network-slice-service:network-slice-services": {
+    "slo-sle-templates": {
+      "slo-sle-template": [
+        {
+          "id": "B",
+          "description": "",
+          "slo-policy": {
+            "metric-bound": [
+              {
+                "metric-type": "energy_consumption",
+                "metric-unit": "kWh",
+                "bound": 20200
+              },
+              {
+                "metric-type": "energy_efficiency",
+                "metric-unit": "Wats/bps",
+                "bound": 6
+              },
+              {
+                "metric-type": "carbon_emission",
+                "metric-unit": "grams of CO2 per kWh",
+                "bound": 750
+              },
+              {
+                "metric-type": "renewable_energy_usage",
+                "metric-unit": "rate",
+                "bound": 0.5
+              }
+            ]
+          },
+          "sle-policy": {
+            "security": "",
+            "isolation": "",
+            "path-constraints": {
+              "service-functions": "",
+              "diversity": {
+                "diversity": {
+                  "diversity-type": ""
+                }
+              }
+            }
+          }
+        }
+      ]
+    },
+    "slice-service": [
+      {
+        "id": "slice-service-88a585f7-a432-4312-8774-6210fb0b2342",
+        "description": "Transport network slice mapped with 3GPP slice NetworkSlice1",
+        "service-tags": {
+            "tag-type": [
+              {
+                "tag-type": "service",
+                "tag-type-value": [
+                  "L2"
+                ]
+              }
+            ]
+        },
+        "slo-sle-policy": {
+          "slo-sle-template": "B"
+        },
+        "status": {},
+        "sdps": {
+          "sdp": [
+            {
+              "id": "A",
+              "geo-location": "",
+              "node-id": "CU-N32",
+              "sdp-ip-address": "10.60.11.3",
+              "tp-ref": "",
+              "service-match-criteria": {
+                "match-criterion": [
+                  {
+                    "index": 1,
+                    "match-type": "VLAN",
+                    "value": "101",
+                    "target-connection-group-id": "CU-N32_UPF-N32"
+                  }
+                ]
+              },
+              "incoming-qos-policy": "",
+              "outgoing-qos-policy": "",
+              "sdp-peering": {
+                "peer-sap-id": "",
+                "protocols": ""
+              },
+              "ac-svc-ref": [],
+              "attachment-circuits": {
+                "attachment-circuit": [
+                  {
+                    "id": "100",
+                    "ac-ipv4-address": "10.60.11.3",
+                    "ac-ipv4-prefix-length": 0,
+                    "sdp-peering": {
+                      "peer-sap-id": "4.4.4.4"
+                    },
+                    "status": {}
+                  }
+                ]
+              },
+              "status": {},
+              "sdp-monitoring": ""
+            },
+            {
+              "id": "B",
+              "geo-location": "",
+              "node-id": "UPF-N32",
+              "sdp-ip-address": "10.60.10.6",
+              "tp-ref": "",
+              "service-match-criteria": {
+                "match-criterion": [
+                  {
+                    "index": 1,
+                    "match-type": "VLAN",
+                    "value": "101",
+                    "target-connection-group-id": "CU-N32_UPF-N32"
+                  }
+                ]
+              },
+              "incoming-qos-policy": "",
+              "outgoing-qos-policy": "",
+              "sdp-peering": {
+                "peer-sap-id": "",
+                "protocols": ""
+              },
+              "ac-svc-ref": [],
+              "attachment-circuits": {
+                "attachment-circuit": [
+                  {
+                    "id": "200",
+                    "ac-ipv4-address": "10.60.10.6",
+                    "ac-ipv4-prefix-length": 0,
+                    "sdp-peering": {
+                      "peer-sap-id": "5.5.5.5"
+                    },
+                    "status": {}
+                  }
+                ]
+              },
+              "status": {},
+              "sdp-monitoring": ""
+            }
+          ]
+        },
+        "connection-groups": {
+          "connection-group": [
+            {
+              "id": "CU-N32_UPF-N32",
+              "connectivity-type": "ietf-vpn-common:any-to-any",
+              "connectivity-construct": [
+                {
+                  "id": 1,
+                  "a2a-sdp": [
+                    {
+                      "sdp-id": "A"
+                    },
+                    {
+                      "sdp-id": "B"
+                    }
+                  ]
+                }
+              ],
+              "status": {}
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/src/tests/requests/l3vpn_test.json b/src/tests/requests/l3vpn_test.json
new file mode 100644
index 0000000000000000000000000000000000000000..4564739ad6eea215a0a5b8892a597e110f706adc
--- /dev/null
+++ b/src/tests/requests/l3vpn_test.json
@@ -0,0 +1,164 @@
+{
+    "ietf-network-slice-service:network-slice-services": {
+      "slo-sle-templates": {
+        "slo-sle-template": [
+          {
+            "id": "A",
+            "description": "",
+            "slo-policy": {
+              "metric-bound": [
+                {
+                  "metric-type": "one-way-bandwidth",
+                  "metric-unit": "kbps",
+                  "bound": 20000000.67
+                },
+                {
+                  "metric-type": "one-way-delay-maximum",
+                  "metric-unit": "milliseconds",
+                  "bound": 5.5
+                }
+              ],
+            "availability": 95,
+            "mtu": 1450
+            },
+            "sle-policy": {
+              "security": "",
+              "isolation": "",
+              "path-constraints": {
+                "service-functions": "",
+                "diversity": {
+                  "diversity": {
+                    "diversity-type": ""
+                  }
+                }
+              }
+            }
+          }
+        ]
+      },
+      "slice-service": [
+        {
+          "id": "slice-service-91327140-7361-41b3-aa45-e84a7fb40b79",
+          "description": "Transport network slice mapped with 3GPP slice NetworkSlice1",
+          "service-tags": {
+             "tag-type": [
+               {
+                 "tag-type": "service",
+                 "tag-type-value": [
+                   "L3"
+                 ]
+               }
+             ]
+          },
+          "slo-sle-policy": {
+            "slo-sle-template": "A"
+          },
+          "status": {},
+          "sdps": {
+            "sdp": [
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "CU-N2",
+                "sdp-ip-address": "10.60.11.3",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "100",
+                      "ac-ipv4-address": "10.60.11.3",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "1.1.1.1"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              },
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "AMF-N2",
+                "sdp-ip-address": "10.60.60.105",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "200",
+                      "ac-ipv4-address": "10.60.60.105",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "3.3.3.3"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              }
+            ]
+          },
+          "connection-groups": {
+            "connection-group": [
+              {
+                "id": "CU-N2_AMF-N2",
+                "connectivity-type": "ietf-vpn-common:any-to-any",
+                "connectivity-construct": [
+                  {
+                    "id": 1,
+                    "a2a-sdp": [
+                      {
+                        "sdp-id": "01"
+                      },
+                      {
+                        "sdp-id": "02"
+                      }
+                    ]
+                  }
+                ],
+                "status": {}
+              }
+            ]
+          }
+        }
+      ]
+    }
+  }
\ No newline at end of file
diff --git a/src/tests/requests/slice_request.json b/src/tests/requests/slice_request.json
new file mode 100644
index 0000000000000000000000000000000000000000..f2150783ae098dc5e9511a986eb60c04f046282f
--- /dev/null
+++ b/src/tests/requests/slice_request.json
@@ -0,0 +1,162 @@
+{
+    "ietf-network-slice-service:network-slice-services": {
+      "slo-sle-templates": {
+        "slo-sle-template": [
+          {
+            "id": "A",
+            "description": "",
+            "slo-policy": {
+              "metric-bound": [
+                {
+                  "metric-type": "one-way-bandwidth",
+                  "metric-unit": "kbps",
+                  "bound": 2000
+                },
+                {
+                  "metric-type": "one-way-delay-maximum",
+                  "metric-unit": "milliseconds",
+                  "bound": 5
+                }
+              ]
+            },
+            "sle-policy": {
+              "security": "",
+              "isolation": "",
+              "path-constraints": {
+                "service-functions": "",
+                "diversity": {
+                  "diversity": {
+                    "diversity-type": ""
+                  }
+                }
+              }
+            }
+          }
+        ]
+      },
+      "slice-service": [
+        {
+          "id": "slice-service-11327140-7361-41b3-aa45-e84a7fb40be9",
+          "description": "Transport network slice mapped with 3GPP slice NetworkSlice1",
+          "service-tags": {
+             "tag-type": [
+               {
+                 "tag-type": "service",
+                 "tag-type-value": [
+                   "L2"
+                 ]
+               }
+             ]
+          },
+          "slo-sle-policy": {
+            "slo-sle-template": "A"
+          },
+          "status": {},
+          "sdps": {
+            "sdp": [
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "CU-N2",
+                "sdp-ip-address": "10.60.11.3",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "100",
+                      "ac-ipv4-address": "10.60.11.3",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "1.1.1.1"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              },
+              {
+                "id": "",
+                "geo-location": "",
+                "node-id": "AMF-N2",
+                "sdp-ip-address": "10.60.60.105",
+                "tp-ref": "",
+                "service-match-criteria": {
+                  "match-criterion": [
+                    {
+                      "index": 1,
+                      "match-type": "VLAN",
+                      "value": "100",
+                      "target-connection-group-id": "CU-N2_AMF-N2"
+                    }
+                  ]
+                },
+                "incoming-qos-policy": "",
+                "outgoing-qos-policy": "",
+                "sdp-peering": {
+                  "peer-sap-id": "",
+                  "protocols": ""
+                },
+                "ac-svc-ref": [],
+                "attachment-circuits": {
+                  "attachment-circuit": [
+                    {
+                      "id": "200",
+                      "ac-ipv4-address": "10.60.60.105",
+                      "ac-ipv4-prefix-length": 0,
+                      "sdp-peering": {
+                        "peer-sap-id": "3.3.3.3"
+                      },
+                      "status": {}
+                    }
+                  ]
+                },
+                "status": {},
+                "sdp-monitoring": ""
+              }
+            ]
+          },
+          "connection-groups": {
+            "connection-group": [
+              {
+                "id": "CU-N2_AMF-N2",
+                "connectivity-type": "ietf-vpn-common:any-to-any",
+                "connectivity-construct": [
+                  {
+                    "id": 1,
+                    "a2a-sdp": [
+                      {
+                        "sdp-id": "01"
+                      },
+                      {
+                        "sdp-id": "02"
+                      }
+                    ]
+                  }
+                ],
+                "status": {}
+              }
+            ]
+          }
+        }
+      ]
+    }
+  }
\ No newline at end of file
diff --git a/src/tests/test_api.py b/src/tests/test_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..7264ab9b0ce115c8c318efda6e078334c6cb76b4
--- /dev/null
+++ b/src/tests/test_api.py
@@ -0,0 +1,301 @@
+import json
+import pytest
+import os
+from unittest.mock import patch, Mock, MagicMock
+from pathlib import Path
+from dotenv import load_dotenv
+import sqlite3
+import time
+from flask import Flask
+from src.main import NSController
+from src.api.main import Api
+
+
+# Load environment variables
+load_dotenv()
+
+@pytest.fixture(scope="session")
+def flask_app():
+    """Crea una app Flask mínima para los tests."""
+    app = Flask(__name__)
+    app.config.update({
+        "TESTING": True,
+        "SERVER_NAME": "localhost",
+        'NRP_ENABLED': os.getenv('NRP_ENABLED', 'False').lower() == 'true',
+        'PLANNER_ENABLED': os.getenv('PLANNER_ENABLED', 'False').lower() == 'true',
+        'PCE_EXTERNAL': os.getenv('PCE_EXTERNAL', 'False').lower() == 'true',
+        'DUMMY_MODE': os.getenv('DUMMY_MODE', 'True').lower() == 'true',
+        'DUMP_TEMPLATES': os.getenv('DUMP_TEMPLATES', 'False').lower() == 'true',
+        'TFS_L2VPN_SUPPORT': os.getenv('TFS_L2VPN_SUPPORT', 'False').lower() == 'true',
+        'WEBUI_DEPLOY': os.getenv('WEBUI_DEPLOY', 'True').lower() == 'true',
+        'UPLOAD_TYPE': os.getenv('UPLOAD_TYPE', 'WEBUI'),
+        'PLANNER_TYPE': os.getenv('PLANNER_TYPE', 'ENERGY'),
+        'HRAT_IP' : os.getenv('HRAT_IP', '10.0.0.1'),
+        'OPTICAL_PLANNER_IP' : os.getenv('OPTICAL_PLANNER_IP', '10.0.0.1')
+    })
+    return app
+
+
+@pytest.fixture(autouse=True)
+def push_flask_context(flask_app):
+    """Empuja automáticamente un contexto Flask para cada test."""
+    with flask_app.app_context():
+        yield
+
+@pytest.fixture
+def temp_db(tmp_path):
+    """Fixture to create and cleanup test database using SQLite instead of JSON."""
+    test_db_name = str(tmp_path / "test_slice.db")
+    
+    # Create database with proper schema
+    conn = sqlite3.connect(test_db_name)
+    cursor = conn.cursor()
+    cursor.execute("""
+        CREATE TABLE IF NOT EXISTS slice (
+            slice_id TEXT PRIMARY KEY,
+            intent TEXT NOT NULL,
+            controller TEXT NOT NULL
+        )
+    """)
+    conn.commit()
+    conn.close()
+    
+    yield test_db_name
+    
+    # Cleanup - properly close connections and remove file
+    try:
+        time.sleep(0.1)
+        if os.path.exists(test_db_name):
+            os.remove(test_db_name)
+    except Exception:
+        time.sleep(0.5)
+        try:
+            if os.path.exists(test_db_name):
+                os.remove(test_db_name)
+        except:
+            pass
+
+
+@pytest.fixture
+def env_variables():
+    """Fixture to load and provide environment variables."""
+    env_vars = {
+        'NRP_ENABLED': os.getenv('NRP_ENABLED', 'False').lower() == 'true',
+        'PLANNER_ENABLED': os.getenv('PLANNER_ENABLED', 'False').lower() == 'true',
+        'PCE_EXTERNAL': os.getenv('PCE_EXTERNAL', 'False').lower() == 'true',
+        'DUMMY_MODE': os.getenv('DUMMY_MODE', 'True').lower() == 'true',
+        'DUMP_TEMPLATES': os.getenv('DUMP_TEMPLATES', 'False').lower() == 'true',
+        'TFS_L2VPN_SUPPORT': os.getenv('TFS_L2VPN_SUPPORT', 'False').lower() == 'true',
+        'WEBUI_DEPLOY': os.getenv('WEBUI_DEPLOY', 'True').lower() == 'true',
+        'UPLOAD_TYPE': os.getenv('UPLOAD_TYPE', 'WEBUI'),
+        'PLANNER_TYPE': os.getenv('PLANNER_TYPE', 'standard'),
+    }
+    return env_vars
+
+
+@pytest.fixture
+def controller_with_mocked_db(temp_db):
+    """Crea un NSController con base de datos mockeada."""
+    with patch('src.database.db.DB_NAME', temp_db):
+        yield NSController(controller_type="TFS")
+
+
+@pytest.fixture
+def ietf_intent():
+    """Intent válido en formato IETF."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {
+                        "id": "qos1",
+                        "slo-policy": {
+                            "metric-bound": [
+                                {
+                                    "metric-type": "one-way-bandwidth",
+                                    "metric-unit": "kbps",
+                                    "bound": 1000
+                                }
+                            ]
+                        }
+                    }
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "slice-test-1",
+                    "sdps": {
+                        "sdp": [
+                            {
+                                "sdp-ip-address": "10.0.0.1",
+                                "node-id": "node1",
+                                "service-match-criteria": {
+                                    "match-criterion": [
+                                        {
+                                            "match-type": "vlan",
+                                            "value": "100"
+                                        }
+                                    ]
+                                },
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "sdp-peering": {
+                                                "peer-sap-id": "R1"
+                                            }
+                                        }
+                                    ]
+                                },
+                            },
+                            {
+                                "sdp-ip-address": "10.0.0.2",
+                                "node-id": "node2",
+                                "service-match-criteria": {
+                                    "match-criterion": [
+                                        {
+                                            "match-type": "vlan",
+                                            "value": "100"
+                                        }
+                                    ]
+                                },
+                                "attachment-circuits": {
+                                    "attachment-circuit": [
+                                        {
+                                            "sdp-peering": {
+                                                "peer-sap-id": "R2"
+                                            }
+                                        }
+                                    ]
+                                },
+                            },
+                        ]
+                    },
+                    "service-tags": {"tag-type": {"value": "L3VPN"}},
+                }
+            ],
+        }
+    }
+
+
+class TestBasicApiOperations:
+    """Tests for basic API operations."""
+    
+    def test_get_flows_empty(self, controller_with_mocked_db):
+        """Debe devolver error cuando no hay slices."""
+        result, code = Api(controller_with_mocked_db).get_flows()
+        assert code == 404
+        assert result["success"] is False
+        assert result["data"] is None
+    
+    def test_add_flow_success(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder añadir un flow exitosamente."""
+        with patch('src.database.db.save_data') as mock_save:
+            result, code = Api(controller_with_mocked_db).add_flow(ietf_intent)
+            assert code == 201
+            assert result["success"] is True
+            assert "slices" in result["data"]
+    
+    def test_add_and_get_flow(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder añadir un flow y luego recuperarlo."""
+        with patch('src.database.db.save_data') as mock_save, \
+             patch('src.database.db.get_all_data') as mock_get_all:
+            
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            
+            mock_get_all.return_value = [
+                {
+                    "slice_id": "slice-test-1",
+                    "intent": ietf_intent,
+                    "controller": "TFS"
+                }
+            ]
+            
+            flows, code = Api(controller_with_mocked_db).get_flows()
+            assert code == 200
+            assert any(s["slice_id"] == "slice-test-1" for s in flows)
+    
+    def test_modify_flow_success(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder modificar un flow existente."""
+        with patch('src.database.db.update_data') as mock_update:
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            new_intent = ietf_intent.copy()
+            new_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = "qos2"
+            
+            result, code = Api(controller_with_mocked_db).modify_flow("slice-test-1", new_intent)
+            print(result)
+            assert code == 200
+            assert result["success"] is True
+    
+    def test_delete_specific_flow_success(self, controller_with_mocked_db, ietf_intent):
+        """Debe borrar un flow concreto."""
+        with patch('src.database.db.delete_data') as mock_delete:
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            result, code = Api(controller_with_mocked_db).delete_flows("slice-test-1")
+            assert code == 204
+            assert result == {}
+    
+    def test_delete_all_flows_success(self, controller_with_mocked_db):
+        """Debe borrar todos los flows."""
+        with patch('src.database.db.delete_all_data') as mock_delete_all:
+            result, code = Api(controller_with_mocked_db).delete_flows()
+            assert code == 204
+            assert result == {}
+    
+    def test_get_specific_flow(self, controller_with_mocked_db, ietf_intent):
+        """Debe poder recuperar un flow específico."""
+        with patch('src.database.db.get_data') as mock_get:
+            Api(controller_with_mocked_db).add_flow(ietf_intent)
+            mock_get.return_value = {
+                "slice_id": "slice-test-1",
+                "intent": ietf_intent,
+                "controller": "TFS"
+            }
+            
+            result, code = Api(controller_with_mocked_db).get_flows("slice-test-1")
+            assert code == 200
+            assert result["slice_id"] == "slice-test-1"
+
+
+class TestErrorHandling:
+    """Tests for error handling."""
+    
+    def test_add_flow_with_empty_intent(self, controller_with_mocked_db):
+        """Debe fallar si se pasa un intent vacío."""
+        result, code = Api(controller_with_mocked_db).add_flow({})
+        assert code in (400, 404, 500)
+        assert result["success"] is False
+    
+    def test_add_flow_with_none(self, controller_with_mocked_db):
+        """Debe fallar si se pasa None como intent."""
+        result, code = Api(controller_with_mocked_db).add_flow(None)
+        assert code in (400, 500)
+        assert result["success"] is False
+    
+    def test_get_nonexistent_slice(self, controller_with_mocked_db):
+        """Debe devolver 404 si se pide un slice inexistente."""
+        with patch('src.database.db.get_data') as mock_get:
+            mock_get.side_effect = ValueError("No slice found")
+            
+            result, code = Api(controller_with_mocked_db).get_flows("slice-does-not-exist")
+            assert code == 404
+            assert result["success"] is False
+    
+    def test_modify_nonexistent_flow(self, controller_with_mocked_db, ietf_intent):
+        """Debe fallar si se intenta modificar un flow inexistente."""
+        with patch('src.database.db.update_data') as mock_update:
+            mock_update.side_effect = ValueError("No slice found")
+            
+            result, code = Api(controller_with_mocked_db).modify_flow("nonexistent", ietf_intent)
+            assert code == 404
+            assert result["success"] is False
+    
+    def test_delete_nonexistent_flow(self, controller_with_mocked_db):
+        """Debe fallar si se intenta eliminar un flow inexistente."""
+        with patch('src.database.db.delete_data') as mock_delete:
+            mock_delete.side_effect = ValueError("No slice found")
+            
+            result, code = Api(controller_with_mocked_db).delete_flows("nonexistent")
+            assert code == 404
+            assert result["success"] is False
+
+
diff --git a/src/tests/test_database.py b/src/tests/test_database.py
new file mode 100644
index 0000000000000000000000000000000000000000..06034eb6be1ea730e75878d6aba337c515093d83
--- /dev/null
+++ b/src/tests/test_database.py
@@ -0,0 +1,585 @@
+import pytest
+import sqlite3
+import json
+import os
+import time
+from unittest.mock import patch, MagicMock
+from src.database.db import (
+    init_db,
+    save_data,
+    update_data,
+    delete_data,
+    get_data,
+    get_all_data,
+    delete_all_data,
+    DB_NAME
+)
+from src.database.store_data import store_data
+
+
+@pytest.fixture
+def test_db(tmp_path):
+    """Fixture to create and cleanup test database."""
+    test_db_name = str(tmp_path / "test_slice.db")
+    
+    # Use test database
+    with patch('src.database.db.DB_NAME', test_db_name):
+        conn = sqlite3.connect(test_db_name)
+        cursor = conn.cursor()
+        cursor.execute("""
+            CREATE TABLE IF NOT EXISTS slice (
+                slice_id TEXT PRIMARY KEY,
+                intent TEXT NOT NULL,
+                controller TEXT NOT NULL
+            )
+        """)
+        conn.commit()
+        conn.close()
+        
+        yield test_db_name
+        
+        # Cleanup - Close all connections and remove file
+        try:
+            # Force SQLite to release locks
+            sqlite3.connect(':memory:').execute('VACUUM').close()
+            
+            # Wait a moment for file locks to release
+            import time
+            time.sleep(0.1)
+            
+            # Remove the file if it exists
+            if os.path.exists(test_db_name):
+                os.remove(test_db_name)
+        except Exception as e:
+            # On Windows, sometimes files are locked. Try again after a delay
+            import time
+            time.sleep(0.5)
+            try:
+                if os.path.exists(test_db_name):
+                    os.remove(test_db_name)
+            except:
+                pass  # If it still fails, let pytest's tmp_path cleanup handle it
+
+
+@pytest.fixture
+def sample_intent():
+    """Fixture providing sample network slice intent."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slice-service": [{
+                "id": "slice-service-12345",
+                "description": "Test network slice",
+                "service-tags": {"tag-type": {"value": "L2VPN"}},
+                "sdps": {
+                    "sdp": [{
+                        "node-id": "node1",
+                        "sdp-ip-address": "10.0.0.1"
+                    }]
+                }
+            }],
+            "slo-sle-templates": {
+                "slo-sle-template": [{
+                    "id": "profile1",
+                    "slo-policy": {
+                        "metric-bound": [{
+                            "metric-type": "one-way-bandwidth",
+                            "metric-unit": "kbps",
+                            "bound": 1000
+                        }]
+                    }
+                }]
+            }
+        }
+    }
+
+
+@pytest.fixture
+def simple_intent():
+    """Fixture providing simple intent for basic testing."""
+    return {
+        "bandwidth": "1Gbps",
+        "latency": "10ms",
+        "provider": "opensec"
+    }
+
+
+class TestInitDb:
+    """Tests for database initialization."""
+    
+    def test_init_db_creates_table(self, tmp_path):
+        """Test that init_db creates the slice table."""
+        test_db = str(tmp_path / "test.db")
+        
+        with patch('src.database.db.DB_NAME', test_db):
+            init_db()
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='slice'")
+            result = cursor.fetchone()
+            conn.close()
+            time.sleep(0.05)  # Brief pause for file lock release
+            
+            assert result is not None
+            assert result[0] == 'slice'
+    
+    def test_init_db_creates_correct_columns(self, tmp_path):
+        """Test that init_db creates table with correct columns."""
+        test_db = str(tmp_path / "test.db")
+        
+        with patch('src.database.db.DB_NAME', test_db):
+            init_db()
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("PRAGMA table_info(slice)")
+            columns = cursor.fetchall()
+            conn.close()
+            time.sleep(0.05)
+            
+            column_names = [col[1] for col in columns]
+            assert "slice_id" in column_names
+            assert "intent" in column_names
+            assert "controller" in column_names
+    
+    def test_init_db_idempotent(self, tmp_path):
+        """Test that init_db can be called multiple times without error."""
+        test_db = str(tmp_path / "test.db")
+        
+        with patch('src.database.db.DB_NAME', test_db):
+            init_db()
+            init_db()  # Should not raise error
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='slice'")
+            result = cursor.fetchone()
+            conn.close()
+            time.sleep(0.05)
+            
+            assert result is not None
+
+
+class TestSaveData:
+    """Tests for save_data function."""
+    
+    def test_save_data_success(self, test_db, simple_intent):
+        """Test successful data saving."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            assert result is not None
+            assert result[0] == "slice-001"
+            assert result[2] == "TFS"
+            assert json.loads(result[1]) == simple_intent
+    
+    def test_save_data_with_complex_intent(self, test_db, sample_intent):
+        """Test saving complex nested intent structure."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT intent FROM slice WHERE slice_id = ?", (slice_id,))
+            result = cursor.fetchone()
+            conn.close()
+            
+            retrieved_intent = json.loads(result[0])
+            assert retrieved_intent == sample_intent
+    
+    def test_save_data_duplicate_slice_id_raises_error(self, test_db, simple_intent):
+        """Test that saving duplicate slice_id raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            
+            with pytest.raises(ValueError, match="already exists"):
+                save_data("slice-001", simple_intent, "TFS")
+    
+    def test_save_data_multiple_slices(self, test_db, simple_intent):
+        """Test saving multiple different slices."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT COUNT(*) FROM slice")
+            count = cursor.fetchone()[0]
+            conn.close()
+            
+            assert count == 2
+    
+    def test_save_data_with_different_controllers(self, test_db, simple_intent):
+        """Test saving data with different controller types."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-tfs", simple_intent, "TFS")
+            save_data("slice-ixia", simple_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-tfs",))
+            tfs_result = cursor.fetchone()
+            cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-ixia",))
+            ixia_result = cursor.fetchone()
+            conn.close()
+            
+            assert tfs_result[0] == "TFS"
+            assert ixia_result[0] == "IXIA"
+
+
+class TestUpdateData:
+    """Tests for update_data function."""
+    
+    def test_update_data_success(self, test_db, simple_intent):
+        """Test successful data update."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            
+            updated_intent = {"bandwidth": "2Gbps", "latency": "5ms", "provider": "opensec"}
+            update_data("slice-001", updated_intent, "TFS")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT intent FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            retrieved_intent = json.loads(result[0])
+            assert retrieved_intent == updated_intent
+    
+    def test_update_data_nonexistent_slice_raises_error(self, test_db, simple_intent):
+        """Test that updating nonexistent slice raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            with pytest.raises(ValueError, match="No slice found"):
+                update_data("nonexistent-slice", simple_intent, "TFS")
+    
+    def test_update_data_controller_type(self, test_db, simple_intent):
+        """Test updating controller type."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            update_data("slice-001", simple_intent, "IXIA")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            assert result[0] == "IXIA"
+    
+    def test_update_data_complex_intent(self, test_db, sample_intent):
+        """Test updating with complex nested structure."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "TFS")
+            
+            updated_sample = sample_intent.copy()
+            updated_sample["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = "Updated description"
+            
+            update_data(slice_id, updated_sample, "IXIA")
+            
+            retrieved = get_data(slice_id)
+            assert retrieved["intent"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] == "Updated description"
+            assert retrieved["controller"] == "IXIA"
+
+
+class TestDeleteData:
+    """Tests for delete_data function."""
+    
+    def test_delete_data_success(self, test_db, simple_intent):
+        """Test successful data deletion."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            delete_data("slice-001")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-001",))
+            result = cursor.fetchone()
+            conn.close()
+            
+            assert result is None
+    
+    def test_delete_data_nonexistent_slice_raises_error(self, test_db):
+        """Test that deleting nonexistent slice raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            with pytest.raises(ValueError, match="No slice found"):
+                delete_data("nonexistent-slice")
+    
+    def test_delete_data_multiple_slices(self, test_db, simple_intent):
+        """Test deleting one slice doesn't affect others."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            
+            delete_data("slice-001")
+            
+            conn = sqlite3.connect(test_db)
+            cursor = conn.cursor()
+            cursor.execute("SELECT COUNT(*) FROM slice")
+            count = cursor.fetchone()[0]
+            cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-002",))
+            remaining = cursor.fetchone()
+            conn.close()
+            
+            assert count == 1
+            assert remaining[0] == "slice-002"
+
+
+class TestGetData:
+    """Tests for get_data function."""
+    
+    def test_get_data_success(self, test_db, simple_intent):
+        """Test retrieving existing data."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            result = get_data("slice-001")
+            
+            assert result["slice_id"] == "slice-001"
+            assert result["intent"] == simple_intent
+            assert result["controller"] == "TFS"
+    
+    def test_get_data_nonexistent_raises_error(self, test_db):
+        """Test that getting nonexistent slice raises ValueError."""
+        with patch('src.database.db.DB_NAME', test_db):
+            with pytest.raises(ValueError, match="No slice found"):
+                get_data("nonexistent-slice")
+    
+    def test_get_data_json_parsing(self, test_db, sample_intent):
+        """Test that returned intent is parsed JSON."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "TFS")
+            result = get_data(slice_id)
+            
+            assert isinstance(result["intent"], dict)
+            assert result["intent"] == sample_intent
+    
+    def test_get_data_returns_all_fields(self, test_db, simple_intent):
+        """Test that get_data returns all fields."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            result = get_data("slice-001")
+            
+            assert "slice_id" in result
+            assert "intent" in result
+            assert "controller" in result
+            assert len(result) == 3
+
+
+class TestGetAllData:
+    """Tests for get_all_data function."""
+    
+    def test_get_all_data_empty_database(self, test_db):
+        """Test retrieving all data from empty database."""
+        with patch('src.database.db.DB_NAME', test_db):
+            result = get_all_data()
+            assert result == []
+    
+    def test_get_all_data_single_slice(self, test_db, simple_intent):
+        """Test retrieving all data with single slice."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            result = get_all_data()
+            
+            assert len(result) == 1
+            assert result[0]["slice_id"] == "slice-001"
+            assert result[0]["intent"] == simple_intent
+    
+    def test_get_all_data_multiple_slices(self, test_db, simple_intent):
+        """Test retrieving all data with multiple slices."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            save_data("slice-003", simple_intent, "TFS")
+            
+            result = get_all_data()
+            
+            assert len(result) == 3
+            slice_ids = [slice_data["slice_id"] for slice_data in result]
+            assert "slice-001" in slice_ids
+            assert "slice-002" in slice_ids
+            assert "slice-003" in slice_ids
+    
+    def test_get_all_data_json_parsing(self, test_db, sample_intent):
+        """Test that all returned intents are parsed JSON."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            save_data(slice_id, sample_intent, "TFS")
+            save_data("slice-002", sample_intent, "IXIA")
+            
+            result = get_all_data()
+            
+            for slice_data in result:
+                assert isinstance(slice_data["intent"], dict)
+    
+    def test_get_all_data_includes_all_controllers(self, test_db, simple_intent):
+        """Test that get_all_data includes slices from different controllers."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-tfs", simple_intent, "TFS")
+            save_data("slice-ixia", simple_intent, "IXIA")
+            
+            result = get_all_data()
+            
+            controllers = [slice_data["controller"] for slice_data in result]
+            assert "TFS" in controllers
+            assert "IXIA" in controllers
+
+
+class TestDeleteAllData:
+    """Tests for delete_all_data function."""
+    
+    def test_delete_all_data_removes_all_slices(self, test_db, simple_intent):
+        """Test that delete_all_data removes all slices."""
+        with patch('src.database.db.DB_NAME', test_db):
+            save_data("slice-001", simple_intent, "TFS")
+            save_data("slice-002", simple_intent, "IXIA")
+            
+            delete_all_data()
+            
+            result = get_all_data()
+            assert result == []
+    
+    def test_delete_all_data_empty_database(self, test_db):
+        """Test delete_all_data on empty database doesn't raise error."""
+        with patch('src.database.db.DB_NAME', test_db):
+            delete_all_data()  # Should not raise error
+            result = get_all_data()
+            assert result == []
+
+
+class TestStoreData:
+    """Tests for store_data wrapper function."""
+    
+    def test_store_data_save_new_slice(self, test_db, sample_intent):
+        """Test store_data saves new slice without slice_id."""
+        with patch('src.database.db.DB_NAME', test_db):
+            store_data(sample_intent, None, "TFS")
+            
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            result = get_data(slice_id)
+            
+            assert result["slice_id"] == slice_id
+            assert result["intent"] == sample_intent
+            assert result["controller"] == "TFS"
+    
+    def test_store_data_update_existing_slice(self, test_db, sample_intent):
+        """Test store_data updates existing slice when slice_id provided."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            
+            # Save initial data
+            save_data(slice_id, sample_intent, "TFS")
+            
+            # Update with store_data
+            updated_intent = sample_intent.copy()
+            updated_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = "Updated"
+            store_data(updated_intent, slice_id, "IXIA")
+            
+            result = get_data(slice_id)
+            assert result["controller"] == "IXIA"
+            assert result["intent"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] == "Updated"
+    
+    def test_store_data_extracts_slice_id_from_intent(self, test_db, sample_intent):
+        """Test store_data correctly extracts slice_id from intent structure."""
+        with patch('src.database.db.DB_NAME', test_db):
+            store_data(sample_intent, None, "TFS")
+            
+            all_data = get_all_data()
+            assert len(all_data) == 1
+            assert all_data[0]["slice_id"] == "slice-service-12345"
+    
+    def test_store_data_with_different_controllers(self, test_db, sample_intent):
+        """Test store_data works with different controller types."""
+        with patch('src.database.db.DB_NAME', test_db):
+            store_data(sample_intent, None, "TFS")
+            
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            result = get_data(slice_id)
+            
+            assert result["controller"] == "TFS"
+
+
+class TestDatabaseIntegration:
+    """Integration tests for database operations."""
+    
+    def test_full_lifecycle_create_read_update_delete(self, test_db, simple_intent):
+        """Test complete slice lifecycle."""
+        with patch('src.database.db.DB_NAME', test_db):
+            # Create
+            save_data("slice-lifecycle", simple_intent, "TFS")
+            
+            # Read
+            result = get_data("slice-lifecycle")
+            assert result["slice_id"] == "slice-lifecycle"
+            
+            # Update
+            updated_intent = {"bandwidth": "5Gbps", "latency": "2ms", "provider": "opensec"}
+            update_data("slice-lifecycle", updated_intent, "IXIA")
+            
+            result = get_data("slice-lifecycle")
+            assert result["intent"] == updated_intent
+            assert result["controller"] == "IXIA"
+            
+            # Delete
+            delete_data("slice-lifecycle")
+            
+            with pytest.raises(ValueError):
+                get_data("slice-lifecycle")
+    
+    def test_concurrent_operations(self, test_db, simple_intent):
+        """Test multiple concurrent database operations."""
+        with patch('src.database.db.DB_NAME', test_db):
+            # Create multiple slices
+            for i in range(5):
+                save_data(f"slice-{i}", simple_intent, "TFS" if i % 2 == 0 else "IXIA")
+            
+            # Verify all created
+            all_data = get_all_data()
+            assert len(all_data) == 5
+            
+            # Update some
+            updated_intent = {"updated": True}
+            for i in range(0, 3):
+                update_data(f"slice-{i}", updated_intent, "TFS")
+            
+            # Verify updates
+            for i in range(0, 3):
+                result = get_data(f"slice-{i}")
+                assert result["intent"]["updated"] is True
+            
+            # Delete some
+            delete_data("slice-0")
+            delete_data("slice-2")
+            
+            all_data = get_all_data()
+            assert len(all_data) == 3
+    
+    def test_data_persistence_across_operations(self, test_db, sample_intent):
+        """Test that data persists correctly across multiple operations."""
+        with patch('src.database.db.DB_NAME', test_db):
+            slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
+            
+            # Save
+            save_data(slice_id, sample_intent, "TFS")
+            
+            # Get all and verify
+            all_before = get_all_data()
+            assert len(all_before) == 1
+            
+            # Save another
+            save_data("slice-other", sample_intent, "IXIA")
+            all_after = get_all_data()
+            assert len(all_after) == 2
+            
+            # Verify first slice still intact
+            first_slice = get_data(slice_id)
+            assert first_slice["intent"] == sample_intent
+            assert first_slice["controller"] == "TFS"
\ No newline at end of file
diff --git a/src/tests/test_e2e.py b/src/tests/test_e2e.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fb91405568b415b4ef7b5148354fc825e3372a4
--- /dev/null
+++ b/src/tests/test_e2e.py
@@ -0,0 +1,101 @@
+import pytest
+import json
+from pathlib import Path
+from itertools import product
+from src.api.main import Api
+from src.main import NSController
+from app import create_app 
+
+# Carpeta donde están los JSON de requests
+REQUESTS_DIR = Path(__file__).parent / "requests"
+
+# Lista de todos los flags booleanos que quieres probar
+FLAGS_TO_TEST = ["WEBUI_DEPLOY", "DUMP_TEMPLATES", "PLANNER_ENABLED", "PCE_EXTERNAL", "NRP_ENABLED"]
+
+# Valores posibles para PLANNER_TYPE
+PLANNER_TYPE_VALUES = ["ENERGY", "HRAT", "TFS_OPTICAL"]
+
+
+@pytest.fixture
+def app(temp_sqlite_db):
+    """Crea la app Flask con configuración por defecto."""
+    app = create_app()
+    return app
+
+@pytest.fixture
+def client(app):
+    """Cliente de test de Flask para hacer requests."""
+    return app.test_client()
+
+@pytest.fixture
+def set_flags(app):
+    """Cambia directamente los flags en app.config"""
+    def _set(flags: dict):
+        for k, v in flags.items():
+            app.config[k] = v
+    return _set
+
+@pytest.fixture
+def temp_sqlite_db(monkeypatch, tmp_path):
+    """Usa una base de datos SQLite temporal durante los tests."""
+    temp_db_path = tmp_path / "test_slice.db"
+    monkeypatch.setattr("src.database.db.DB_NAME", str(temp_db_path))
+
+    # Inicializa la base de datos temporal
+    from src.database.db import init_db
+    init_db()
+
+    yield temp_db_path
+
+    # Limpieza al finalizar
+    if temp_db_path.exists():
+        temp_db_path.unlink()
+
+# Función para cargar todos los JSONs
+def load_request_files():
+    test_cases = []
+    for f in REQUESTS_DIR.glob("*.json"):
+        with open(f, "r") as file:
+            json_data = json.load(file)
+        test_cases.append(json_data)
+    return test_cases
+
+# Generador de todas las combinaciones de flags
+def generate_flag_combinations():
+    bool_values = [True, False]
+    for combo in product(bool_values, repeat=len(FLAGS_TO_TEST)):
+        bool_flags = dict(zip(FLAGS_TO_TEST, combo)) 
+        for planner_type in PLANNER_TYPE_VALUES:
+            yield {**bool_flags, "PLANNER_TYPE": planner_type}
+
+
+# Fixture que combina cada request con cada combinación de flags
+def generate_test_cases():
+    requests = load_request_files()
+    for json_data in requests:
+        for flags in generate_flag_combinations():
+            expected_codes = [200,201]
+            yield (json_data, flags, expected_codes)
+
+@pytest.mark.parametrize(
+    "json_data, flags, expected_codes",
+    list(generate_test_cases())
+)
+def test_add_and_delete_flow(app, json_data, flags, expected_codes, set_flags, temp_sqlite_db):
+    with app.app_context():
+        set_flags(flags)
+
+        controller = NSController(controller_type="TFS")
+        api = Api(controller)
+
+        # Añadir flujo
+        data, code = api.add_flow(json_data)
+        assert code in expected_codes, f"Flags en fallo: {flags}"
+
+        # Eliminar flujo si fue creado
+        if code == 201 and isinstance(data, dict) and "slice_id" in data:
+            slice_id = data["slice_id"]
+            _, delete_code = api.delete_flows(slice_id=slice_id)
+            assert delete_code == 204, f"No se pudo eliminar el slice {slice_id}"
+
+
diff --git a/src/tests/test_initialization.py b/src/tests/test_initialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..c51cc0659a4d409781c1cdebf7b0512f158d38e6
--- /dev/null
+++ b/src/tests/test_initialization.py
@@ -0,0 +1,37 @@
+import pytest
+
+# Importa tu clase (ajusta el nombre del módulo si es distinto)
+from src.main import NSController  
+
+def test_init_default_values():
+    """Test that default initialization sets expected values."""
+    controller = NSController()
+
+    # Atributo configurable
+    assert controller.controller_type == "TFS"
+
+    # Atributos internos
+    assert controller.path == ""
+    assert controller.response == []
+    assert controller.start_time == 0
+    assert controller.end_time == 0
+    assert controller.setup_time == 0
+
+@pytest.mark.parametrize("controller_type", ["TFS", "IXIA", "custom"])
+def test_init_controller_type(controller_type):
+    """Test initialization with different controller types."""
+    controller = NSController(controller_type=controller_type)
+    assert controller.controller_type == controller_type
+
+def test_init_independence_between_instances():
+    """Test that each instance has independent state (mutable attrs)."""
+    c1 = NSController()
+    c2 = NSController()
+
+    # Modifico una lista en una instancia
+    c1.response.append("test-response")
+
+    # La otra instancia no debería verse afectada
+    assert c2.response == []
+    assert c1.response == ["test-response"]
+
diff --git a/src/tests/test_mapper.py b/src/tests/test_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..219923f0bf70e6920398e945a14205dd913baab9
--- /dev/null
+++ b/src/tests/test_mapper.py
@@ -0,0 +1,639 @@
+import pytest
+import logging
+from unittest.mock import patch, MagicMock, call
+from flask import Flask
+from src.mapper.main import mapper
+from src.mapper.slo_viability import slo_viability
+
+
+@pytest.fixture
+def sample_ietf_intent():
+    """Fixture providing sample IETF network slice intent."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slice-service": [{
+                "id": "slice-service-12345",
+                "description": "Test network slice",
+                "service-tags": {"tag-type": {"value": "L2VPN"}}
+            }],
+            "slo-sle-templates": {
+                "slo-sle-template": [{
+                    "id": "profile1",
+                    "slo-policy": {
+                        "metric-bound": [
+                            {
+                                "metric-type": "one-way-bandwidth",
+                                "metric-unit": "kbps",
+                                "bound": 1000
+                            },
+                            {
+                                "metric-type": "one-way-delay-maximum",
+                                "metric-unit": "milliseconds",
+                                "bound": 10
+                            }
+                        ]
+                    }
+                }]
+            }
+        }
+    }
+
+
+@pytest.fixture
+def sample_nrp_view():
+    """Fixture providing sample NRP view."""
+    return [
+        {
+            "id": "nrp-1",
+            "available": True,
+            "slices": [],
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 1500
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 8
+                }
+            ]
+        },
+        {
+            "id": "nrp-2",
+            "available": True,
+            "slices": [],
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 500
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 15
+                }
+            ]
+        },
+        {
+            "id": "nrp-3",
+            "available": False,
+            "slices": [],
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 2000
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 5
+                }
+            ]
+        }
+    ]
+
+
+@pytest.fixture
+def mock_app():
+    """Fixture providing mock Flask app context."""
+    app = Flask(__name__)
+    app.config = {
+        "NRP_ENABLED": False,
+        "PLANNER_ENABLED": False,
+        "SERVER_NAME": "localhost",
+        "APPLICATION_ROOT": "/",
+        "PREFERRED_URL_SCHEME": "http"  
+    }
+    return app
+
+
+@pytest.fixture
+def app_context(mock_app):
+    """Fixture providing Flask application context."""
+    with mock_app.app_context():
+        yield mock_app
+
+
+class TestSloViability:
+    """Tests for slo_viability function."""
+    
+    def test_slo_viability_meets_all_requirements(self):
+        """Test when NRP meets all SLO requirements."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            },
+            {
+                "metric-type": "one-way-delay-maximum",
+                "bound": 10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 1500
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 8
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert score > 0
+    
+    def test_slo_viability_fails_bandwidth_minimum(self):
+        """Test when NRP doesn't meet minimum bandwidth requirement."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 500  # Less than required
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is False
+        assert score == 0
+    
+    def test_slo_viability_fails_delay_maximum(self):
+        """Test when NRP doesn't meet maximum delay requirement."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-delay-maximum",
+                "bound": 10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 15  # Greater than maximum allowed
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is False
+        assert score == 0
+    
+    def test_slo_viability_multiple_metrics_partial_failure(self):
+        """Test when one metric fails in a multi-metric comparison."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            },
+            {
+                "metric-type": "one-way-delay-maximum",
+                "bound": 10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 1500  # OK
+                },
+                {
+                    "metric-type": "one-way-delay-maximum",
+                    "bound": 15  # NOT OK
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is False
+        assert score == 0
+    
+    def test_slo_viability_flexibility_score_calculation(self):
+        """Test flexibility score calculation."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 2000  # 100% better than requirement
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        # Flexibility = (2000 - 1000) / 1000 = 1.0
+        assert score == 1.0
+    
+    def test_slo_viability_empty_slos(self):
+        """Test with empty SLO list."""
+        slice_slos = []
+        nrp_slos = {"slos": []}
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert score == 0
+    
+    def test_slo_viability_no_matching_metrics(self):
+        """Test when there are no matching metric types."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1000
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "two-way-bandwidth",
+                    "bound": 1500
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        # Should still return True as no metrics failed
+        assert viable is True
+        assert score == 0
+    
+    def test_slo_viability_packet_loss_maximum_type(self):
+        """Test packet loss as maximum constraint type."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-packet-loss",
+                "bound": 0.01  # 1% maximum acceptable
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-packet-loss",
+                    "bound": 0.005  # 0.5% NRP loss
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert score > 0
+
+
+class TestMapper:
+    """Tests for mapper function."""
+    
+    def test_mapper_with_nrp_disabled_and_planner_disabled(self, app_context, sample_ietf_intent):
+        """Test mapper when both NRP and Planner are disabled."""
+        app_context.config = {
+            "NRP_ENABLED": False,
+            "PLANNER_ENABLED": False
+        }
+        
+        result = mapper(sample_ietf_intent)
+        
+        assert result is None
+    
+    @patch('src.mapper.main.Planner')
+    def test_mapper_with_planner_enabled(self, mock_planner_class, app_context, sample_ietf_intent):
+        """Test mapper when Planner is enabled."""
+        app_context.config = {
+            "NRP_ENABLED": False,
+            "PLANNER_ENABLED": True,
+            "PLANNER_TYPE":"ENERGY"
+        }
+        
+        mock_planner_instance = MagicMock()
+        mock_planner_instance.planner.return_value = {"path": "node1->node2->node3"}
+        mock_planner_class.return_value = mock_planner_instance
+        
+        result = mapper(sample_ietf_intent)
+        
+        assert result == {"path": "node1->node2->node3"}
+        mock_planner_instance.planner.assert_called_once_with(sample_ietf_intent, "ENERGY")
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_enabled_finds_best_nrp(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper with NRP enabled finds the best NRP."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False,
+        }
+        
+        mock_realizer.return_value = sample_nrp_view
+        
+        result = mapper(sample_ietf_intent)
+        
+        # Verify realizer was called to READ NRP view
+        assert mock_realizer.call_args_list[0] == call(None, True, "READ")
+        assert result is None
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_enabled_no_viable_candidates(self, mock_realizer, app_context, sample_ietf_intent):
+        """Test mapper when no viable NRPs are found."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        # All NRPs are unavailable
+        nrp_view = [
+            {
+                "id": "nrp-1",
+                "available": False,
+                "slices": [],
+                "slos": [
+                    {
+                        "metric-type": "one-way-bandwidth",
+                        "bound": 500
+                    }
+                ]
+            }
+        ]
+        
+        mock_realizer.return_value = nrp_view
+        
+        result = mapper(sample_ietf_intent)
+        
+        assert result is None
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_enabled_creates_new_nrp(self, mock_realizer, app_context, sample_ietf_intent):
+        """Test mapper creates new NRP when no suitable candidate exists."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        # No viable NRPs
+        nrp_view = []
+        
+        mock_realizer.side_effect = [nrp_view, None]  # First call returns empty, second for CREATE
+        
+        result = mapper(sample_ietf_intent)
+        
+        # Verify CREATE was called
+        create_call = [c for c in mock_realizer.call_args_list if len(c[0]) > 2 and c[0][2] == "CREATE"]
+        assert len(create_call) > 0
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_with_nrp_and_planner_both_enabled(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper when both NRP and Planner are enabled."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": True,
+            "PLANNER_TYPE":"ENERGY"
+        }
+        
+        mock_realizer.return_value = sample_nrp_view
+        
+        with patch('src.mapper.main.Planner') as mock_planner_class:
+            mock_planner_instance = MagicMock()
+            mock_planner_instance.planner.return_value = {"path": "optimized_path"}
+            mock_planner_class.return_value = mock_planner_instance
+            
+            result = mapper(sample_ietf_intent)
+            
+            # Planner should be called and return the result
+            assert result == {"path": "optimized_path"}
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_updates_best_nrp_with_slice(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper updates best NRP with new slice."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        mock_realizer.return_value = sample_nrp_view
+        
+        result = mapper(sample_ietf_intent)
+        
+        # Verify UPDATE was called
+        update_calls = [c for c in mock_realizer.call_args_list if len(c[0]) > 2 and c[0][2] == "UPDATE"]
+        assert len(update_calls) > 0
+    
+    @patch('src.mapper.main.realizer')
+    def test_mapper_extracts_slos_correctly(self, mock_realizer, app_context, sample_ietf_intent):
+        """Test that mapper correctly extracts SLOs from intent."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        mock_realizer.return_value = []
+        
+        mapper(sample_ietf_intent)
+        
+        # Verify the function processed the intent
+        assert mock_realizer.called
+    
+    @patch('src.mapper.main.logging')
+    def test_mapper_logs_debug_info(self, mock_logging, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test mapper logs debug information."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        with patch('src.mapper.main.realizer') as mock_realizer:
+            mock_realizer.return_value = sample_nrp_view
+            
+            mapper(sample_ietf_intent)
+            
+            # Verify debug logging was called
+            assert mock_logging.debug.called
+
+
+class TestMapperIntegration:
+    """Integration tests for mapper functionality."""
+    
+    def test_mapper_complete_nrp_workflow(self, app_context, sample_ietf_intent, sample_nrp_view):
+        """Test complete NRP mapping workflow."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        with patch('src.mapper.main.realizer') as mock_realizer:
+            mock_realizer.return_value = sample_nrp_view
+            
+            result = mapper(sample_ietf_intent)
+            
+            # Verify the workflow sequence
+            assert mock_realizer.call_count >= 1
+            first_call = mock_realizer.call_args_list[0]
+            assert first_call[0][1] is True  # need_nrp parameter
+            assert first_call[0][2] == "READ"  # READ operation
+    
+    def test_mapper_complete_planner_workflow(self, app_context, sample_ietf_intent):
+        """Test complete Planner workflow."""
+        app_context.config = {
+            "NRP_ENABLED": False,
+            "PLANNER_ENABLED": True,
+            "PLANNER_TYPE":"ENERGY"
+        }
+        
+        expected_path = {
+            "path": "node1->node2->node3",
+            "cost": 10,
+            "latency": 5
+        }
+        
+        with patch('src.mapper.main.Planner') as mock_planner_class:
+            mock_planner_instance = MagicMock()
+            mock_planner_instance.planner.return_value = expected_path
+            mock_planner_class.return_value = mock_planner_instance
+            
+            result = mapper(sample_ietf_intent)
+            
+            assert result == expected_path
+            mock_planner_instance.planner.assert_called_once()
+    
+    def test_mapper_with_invalid_nrp_response(self, app_context, sample_ietf_intent):
+        """Test mapper behavior with invalid NRP response."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        # Invalid NRP without expected fields
+        invalid_nrp = {
+            "id": "nrp-invalid"
+            # Missing 'available' and 'slos' fields
+        }
+        
+        with patch('src.mapper.main.realizer') as mock_realizer:
+            mock_realizer.return_value = [invalid_nrp]
+            
+            # Should handle gracefully
+            try:
+                result = mapper(sample_ietf_intent)
+            except (KeyError, TypeError):
+                # Expected to fail gracefully
+                pass
+    
+    def test_mapper_with_missing_slos_in_intent(self, app_context):
+        """Test mapper behavior when intent has no SLOs."""
+        app_context.config = {
+            "NRP_ENABLED": True,
+            "PLANNER_ENABLED": False
+        }
+        
+        invalid_intent = {
+            "ietf-network-slice-service:network-slice-services": {
+                "slice-service": [{
+                    "id": "slice-1"
+                }],
+                "slo-sle-templates": {
+                    "slo-sle-template": [{
+                        "id": "profile1",
+                        "slo-policy": {
+                            # No metric-bound key
+                        }
+                    }]
+                }
+            }
+        }
+        
+        try:
+            mapper(invalid_intent)
+        except (KeyError, TypeError):
+            # Expected behavior
+            pass
+
+
+class TestSloViabilityEdgeCases:
+    """Edge case tests for slo_viability function."""
+    
+    def test_slo_viability_with_zero_bound(self):
+        """Test handling of zero bounds in SLO."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 0
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 100
+                }
+            ]
+        }
+        
+        # Should handle zero division gracefully or fail as expected
+        try:
+            viable, score = slo_viability(slice_slos, nrp_slos)
+        except (ZeroDivisionError, ValueError):
+            pass
+    
+    def test_slo_viability_with_very_large_bounds(self):
+        """Test handling of very large SLO bounds."""
+        slice_slos = [
+            {
+                "metric-type": "one-way-bandwidth",
+                "bound": 1e10
+            }
+        ]
+        
+        nrp_slos = {
+            "slos": [
+                {
+                    "metric-type": "one-way-bandwidth",
+                    "bound": 2e10
+                }
+            ]
+        }
+        
+        viable, score = slo_viability(slice_slos, nrp_slos)
+        
+        assert viable is True
+        assert isinstance(score, (int, float))
+    
+    def test_slo_viability_all_delay_types(self):
+        """Test handling of all delay metric t ypes."""
+        delay_types = [
+            "one-way-delay-maximum",
+            "two-way-delay-maximum",
+            "one-way-delay-percentile",
+            "two-way-delay-percentile",
+            "one-way-delay-variation-maximum",
+            "two-way-delay-variation-maximum"
+        ]
+        
+        for delay_type in delay_types:
+            slice_slos = [{"metric-type": delay_type, "bound": 10}]
+            nrp_slos = {"slos": [{"metric-type": delay_type, "bound": 8}]}
+            
+            viable, score = slo_viability(slice_slos, nrp_slos)
+            
+            assert viable is True
+            assert score >= 0
\ No newline at end of file
diff --git a/src/tests/test_nbi_processor.py b/src/tests/test_nbi_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef1349425fba9d6756a4b9af122ba58b4a5ff41b
--- /dev/null
+++ b/src/tests/test_nbi_processor.py
@@ -0,0 +1,222 @@
+import pytest
+from unittest.mock import patch
+from src.nbi_processor.detect_format import detect_format
+from src.nbi_processor.main import nbi_processor
+from src.nbi_processor.translator import translator
+
+
+# ---------- Tests detect_format ----------
+
+def test_detect_format_ietf():
+    data = {"ietf-network-slice-service:network-slice-services": {}}
+    assert detect_format(data) == "IETF"
+
+def test_detect_format_3gpp_variants():
+    assert detect_format({"RANSliceSubnet1": {}}) == "3GPP"
+    assert detect_format({"NetworkSlice1": {}}) == "3GPP"
+    assert detect_format({"TopSliceSubnet1": {}}) == "3GPP"
+    assert detect_format({"CNSliceSubnet1": {}}) == "3GPP"
+
+def test_detect_format_none():
+    assert detect_format({"foo": "bar"}) is None
+
+
+# ---------- Fixtures ----------
+
+@pytest.fixture
+def ietf_intent():
+    return {"ietf-network-slice-service:network-slice-services": {"foo": "bar"}}
+
+@pytest.fixture
+def gpp_intent():
+    # Estructura mínima consistente con translator
+    return {
+        "RANSliceSubnet1": {
+            "networkSliceSubnetRef": ["subnetA", "subnetB"]
+        },
+        "subnetA": {
+            "EpTransport": ["EpTransport ep1", "EpTransport ep2"],
+            "SliceProfileList": [{
+                "RANSliceSubnetProfile": {
+                    "dLThptPerSliceSubnet": {
+                        "GuaThpt": 1,
+                        "MaxThpt": 2
+                    },
+                    "uLThptPerSliceSubnet": {
+                        "GuaThpt": 1,
+                        "MaxThpt": 2
+                    },
+                    "dLLatency": 20,
+                    "uLLatency": 20
+                }
+            }],
+        },
+        "subnetB": {
+            "EpTransport": ["EpTransport ep3", "EpTransport ep4"],
+        },
+        "EpTransport ep1": {
+            "qosProfile": "qosA",
+            "EpApplicationRef": ["EP_N2 epRef1"],
+            "logicalInterfaceInfo": {"logicalInterfaceType": "typeA", "logicalInterfaceId": "idA"},
+            "IpAddress": "1.1.1.1",
+            "NextHopInfo": "NH1",
+        },
+        "EpTransport ep2": {
+            "qosProfile": "qosB",
+            "EpApplicationRef": ["EP_N2 epRef2"],
+            "logicalInterfaceInfo": {"logicalInterfaceType": "typeB", "logicalInterfaceId": "idB"},
+            "IpAddress": "2.2.2.2",
+            "NextHopInfo": "NH2",
+        },
+        "EP_N2 epRef1": {"localAddress": "10.0.0.1", "remoteAddress": "11.1.1.1", "epTransportRef": "ep1"},
+        "EP_N2 epRef2": {"localAddress": "10.0.0.2", "remoteAddress": "11.1.1.2", "epTransportRef": "ep2"},
+        "EpTransport ep3": {"qosProfile": "qosC", "EpApplicationRef": ["EP_N2 epRef3"], "logicalInterfaceInfo": {"logicalInterfaceType": "typeC", "logicalInterfaceId": "idC"}, "IpAddress": "3.3.3.3", "NextHopInfo": "NH3"},
+        "EpTransport ep4": {"qosProfile": "qosD", "EpApplicationRef": ["EP_N2 epRef4"], "logicalInterfaceInfo": {"logicalInterfaceType": "typeD", "logicalInterfaceId": "idD"}, "IpAddress": "4.4.4.4", "NextHopInfo": "NH4"},
+        "EP_N2 epRef3": {"localAddress": "10.0.0.3", "remoteAddress": "11.1.1.3", "epTransportRef": "ep3"},
+        "EP_N2 epRef4": {"localAddress": "10.0.0.4", "remoteAddress": "11.1.1.4", "epTransportRef": "ep4"},
+    }
+
+
+@pytest.fixture
+def fake_template():
+    # Plantilla mínima para que el traductor funcione
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {"id": "", "slo-policy": {"metric-bound": []}}
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "",
+                    "description": "",
+                    "slo-sle-policy": {},
+                    "sdps": {"sdp": [
+                        {"service-match-criteria": {"match-criterion": [{}]}, "attachment-circuits": {"attachment-circuit": [{"sdp-peering": {}}]}},
+                        {"service-match-criteria": {"match-criterion": [{}]}, "attachment-circuits": {"attachment-circuit": [{"sdp-peering": {}}]}}
+                    ]},
+                    "connection-groups": {"connection-group": [{}]},
+                }
+            ],
+        }
+    }
+
+
+# ---------- Tests nbi_processor ----------
+
+def test_nbi_processor_ietf(ietf_intent):
+    result = nbi_processor(ietf_intent)
+    assert isinstance(result, list)
+    assert result[0] == ietf_intent
+
+@patch("src.nbi_processor.main.translator")
+def test_nbi_processor_3gpp(mock_translator, gpp_intent):
+    mock_translator.return_value = {"ietf-network-slice-service:network-slice-services": {}}
+    result = nbi_processor(gpp_intent)
+    assert isinstance(result, list)
+    assert len(result) == 2  # Dos subnets procesados
+    assert all("ietf-network-slice-service:network-slice-services" in r for r in result)
+
+def test_nbi_processor_unrecognized():
+    with pytest.raises(ValueError):
+        nbi_processor({"foo": "bar"})
+
+def test_nbi_processor_empty():
+    with pytest.raises(ValueError):
+        nbi_processor({})
+
+
+# ---------- Tests translator ----------
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_basic(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    result = translator(gpp_intent, "subnetA")
+
+    assert isinstance(result, dict)
+    assert "ietf-network-slice-service:network-slice-services" in result
+
+    slice_service = result["ietf-network-slice-service:network-slice-services"]["slice-service"][0]
+    assert slice_service["id"].startswith("slice-service-")
+    assert "description" in slice_service
+    assert slice_service["slo-sle-policy"]["slo-sle-template"] == "qosA"  # viene del ep1
+
+import re
+import uuid
+
+
+# ---------- Extra detect_format ----------
+
+@pytest.mark.parametrize("data", [
+    None,
+    [],
+    "",
+    123,
+])
+def test_detect_format_invalid_types(data):
+    assert detect_format(data if isinstance(data, dict) else {}) in (None, "IETF", "3GPP")
+
+
+def test_detect_format_multiple_keys():
+    # Si tiene IETF y 3GPP, debe priorizar IETF
+    data = {
+        "ietf-network-slice-service:network-slice-services": {},
+        "RANSliceSubnet1": {}
+    }
+    assert detect_format(data) == "IETF"
+
+
+# ---------- Extra nbi_processor ----------
+
+def test_nbi_processor_gpp_missing_refs(gpp_intent):
+    # Quitar networkSliceSubnetRef debería provocar ValueError en translator loop
+    broken = gpp_intent.copy()
+    broken["RANSliceSubnet1"] = {}  # no tiene "networkSliceSubnetRef"
+    with pytest.raises(KeyError):
+        nbi_processor(broken)
+
+
+# ---------- Extra translator ----------
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_maps_metrics(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    result = translator(gpp_intent, "subnetA")
+
+    metrics = result["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+    metric_types = {m["metric-type"] for m in metrics}
+    assert "one-way-delay-maximum" in metric_types
+    assert "one-way-bandwidth" in metric_types
+
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_empty_profile(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    gpp_intent["subnetA"]["SliceProfileList"] = [{}]  # vacío
+    result = translator(gpp_intent, "subnetA")
+    metrics = result["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
+    assert metrics == []  # no debería añadir nada
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_sdps_are_populated(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    result = translator(gpp_intent, "subnetA")
+    slice_service = result["ietf-network-slice-service:network-slice-services"]["slice-service"][0]
+
+    sdp0 = slice_service["sdps"]["sdp"][0]
+    assert sdp0["node-id"] == "ep1"
+    assert re.match(r"^\d+\.\d+\.\d+\.\d+$", sdp0["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"])
+    assert "target-connection-group-id" in sdp0["service-match-criteria"]["match-criterion"][0]
+
+    sdp1 = slice_service["sdps"]["sdp"][1]
+    assert sdp1["node-id"] == "ep2"
+    assert sdp1["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"].startswith("NH")
+
+
+@patch("src.nbi_processor.translator.load_template")
+def test_translator_with_single_endpoint_should_fail(mock_load_template, gpp_intent, fake_template):
+    mock_load_template.return_value = fake_template
+    gpp_intent["subnetA"]["EpTransport"] = ["EpTransport ep1"]  # solo uno
+    with pytest.raises(IndexError):
+        translator(gpp_intent, "subnetA")
diff --git a/src/tests/test_utils.py b/src/tests/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b887a497180777fb9fdd86ca5f04d57c5b4c77a
--- /dev/null
+++ b/src/tests/test_utils.py
@@ -0,0 +1,182 @@
+import json
+import pytest
+import os
+
+from src.utils.load_template import load_template
+from src.utils.dump_templates import dump_templates
+from src.utils.send_response import send_response
+from src.utils.build_response import build_response
+from flask import Flask
+
+@pytest.fixture
+def tmp_json_file(tmp_path):
+    """Crea un archivo JSON temporal válido y devuelve su ruta y contenido."""
+    data = {"name": "test"}
+    file_path = tmp_path / "template.json"
+    file_path.write_text(json.dumps(data))
+    return file_path, data
+
+
+def test_load_template_ok(tmp_json_file):
+    """Debe cargar correctamente un JSON válido."""
+    file_path, expected = tmp_json_file
+    result = load_template(str(file_path))
+    assert result == expected
+
+
+def test_load_template_invalid(tmp_path):
+    """Debe devolver un response con error si el JSON es inválido."""
+    bad_file = tmp_path / "bad.json"
+    bad_file.write_text("{invalid json}")
+
+    result, code = load_template(str(bad_file))
+    assert code == 500
+    assert result["success"] is False
+    assert "Template loading error" in result["error"]
+
+def test_dump_templates_enabled(monkeypatch, tmp_path):
+    """Debe volcar múltiples JSON correctamente en src/templates si DUMP_TEMPLATES está activado."""
+    templates_dir = tmp_path / "src" / "templates"
+    templates_dir.mkdir(parents=True)
+
+    monkeypatch.setattr("src.utils.dump_templates.TEMPLATES_PATH", str(templates_dir))
+
+    app = Flask(__name__)
+    app.config["DUMP_TEMPLATES"] = True
+
+    with app.app_context():
+        nbi = {"nbi": 1}
+        ietf = {"ietf": 2}
+        realizer = {"realizer": 3}
+
+        dump_templates(nbi, ietf, realizer)
+
+    for name, data in [("nbi_template.json", nbi), ("ietf_template.json", ietf), ("realizer_template.json", realizer)]:
+        file_path = templates_dir / name
+        assert file_path.exists()
+        assert json.loads(file_path.read_text()) == data
+
+def test_dump_templates_disabled(monkeypatch, tmp_path):
+    """No debe escribir nada en src/templates si DUMP_TEMPLATES está desactivado."""
+    templates_dir = tmp_path / "src" / "templates"
+    templates_dir.mkdir(parents=True)
+
+    monkeypatch.setattr("src.utils.dump_templates.TEMPLATES_PATH", str(templates_dir))
+
+    app = Flask(__name__)
+    app.config["DUMP_TEMPLATES"] = False
+
+    with app.app_context():
+        dump_templates({"nbi": 1}, {"ietf": 2}, {"realizer": 3})
+
+    for name in ["nbi_template.json", "ietf_template.json", "realizer_template.json"]:
+        assert not (templates_dir / name).exists()
+
+def test_send_response_success():
+    """Debe devolver success=True y code=200 si el resultado es True."""
+    resp, code = send_response(True, data={"k": "v"})
+    assert code == 200
+    assert resp["success"] is True
+    assert resp["data"]["k"] == "v"
+    assert resp["error"] is None
+
+
+def test_send_response_error():
+    """Debe devolver success=False y code=400 si el resultado es False."""
+    resp, code = send_response(False, message="fallo")
+    assert code == 400
+    assert resp["success"] is False
+    assert resp["data"] is None
+    assert "fallo" in resp["error"]
+
+def ietf_intent():
+    """Intento válido en formato IETF simplificado."""
+    return {
+        "ietf-network-slice-service:network-slice-services": {
+            "slo-sle-templates": {
+                "slo-sle-template": [
+                    {
+                        "id": "qos1",
+                        "slo-policy": {
+                            "metric-bound": [
+                                {
+                                    "metric-type": "one-way-bandwidth",
+                                    "metric-unit": "kbps",
+                                    "bound": 1000
+                                }
+                            ],
+                            "availability": 99.9,
+                            "mtu": 1500
+                        }
+                    }
+                ]
+            },
+            "slice-service": [
+                {
+                    "id": "slice-test-1",
+                    "sdps": {
+                        "sdp": [
+                            {
+                                "id": "CU",
+                                "sdp-ip-address": "10.0.0.1",
+                                "service-match-criteria": {
+                                    "match-criterion": [{"match-type": "vlan", "value": "100"}]
+                                },
+                            },
+                            {
+                                "id": "DU",
+                                "sdp-ip-address": "10.0.0.2",
+                                "service-match-criteria": {
+                                    "match-criterion": [{"match-type": "vlan", "value": "100"}]
+                                },
+                            },
+                        ]
+                    },
+                }
+            ],
+        }
+    }
+
+
+def test_build_response_ok():
+    """Debe construir correctamente el response a partir de un intent IETF válido."""
+    intent = ietf_intent()
+    response = []
+    result = build_response(intent, response)
+
+    assert isinstance(result, list)
+    assert len(result) == 1
+
+    slice_data = result[0]
+    assert slice_data["id"] == "slice-test-1"
+    assert slice_data["source"] == "CU"
+    assert slice_data["destination"] == "DU"
+    assert slice_data["vlan"] == "100"
+
+    # Validar constraints
+    requirements = slice_data["requirements"]
+    assert any(r["constraint_type"] == "one-way-bandwidth[kbps]" and r["constraint_value"] == "1000" for r in requirements)
+    assert any(r["constraint_type"] == "availability[%]" and r["constraint_value"] == "99.9" for r in requirements)
+    assert any(r["constraint_type"] == "mtu[bytes]" and r["constraint_value"] == "1500" for r in requirements)
+
+
+def test_build_response_empty_policy():
+    """Debe devolver lista sin constraints si slo-policy está vacío."""
+    intent = ietf_intent()
+    intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"] = {}
+    response = []
+    result = build_response(intent, response)
+
+    assert isinstance(result, list)
+    assert len(result[0]["requirements"]) == 0
+
+
+def test_build_response_invalid_intent():
+    """Debe fallar limpiamente si el intent no tiene la estructura esperada."""
+    bad_intent = {}
+    response = []
+    try:
+        result = build_response(bad_intent, response)
+    except Exception:
+        result = []
+    assert result == []
diff --git a/src/utils/build_response.py b/src/utils/build_response.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7ddba0be77fa9fd103e9bc1a12b82fd7b62cd88
--- /dev/null
+++ b/src/utils/build_response.py
@@ -0,0 +1,90 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from .safe_get import safe_get
+
+def build_response(intent, response, controller_type = None):
+    """
+    Build a structured response from network slice intent.
+    
+    Extracts key information from an IETF network slice intent and formats it
+    into a standardized response structure with slice details and QoS requirements.
+
+    Args:
+        intent (dict): IETF network slice service intent containing:
+            - slice-service: Service configuration with SDPs and IDs
+            - slo-sle-templates: QoS policy templates
+        response (list): Existing response list to append to
+        controller_type (str, optional): Type of controller managing the slice.
+                                        Defaults to None
+
+    Returns:
+        list: Updated response list with appended slice information containing:
+            - id: Slice service identifier
+            - source: Source service delivery point ID
+            - destination: Destination service delivery point ID
+            - vlan: VLAN identifier from match criteria
+            - requirements: List of QoS constraint dictionaries with:
+                * constraint_type: Metric type and unit (e.g., "latency[ms]")
+                * constraint_value: Bound value as string
+                
+    Notes:
+        - Extracts metric bounds from SLO policy (bandwidth, delay, jitter, etc.)
+        - Includes availability and MTU if specified in SLO policy
+        - Assumes point-to-point topology with exactly 2 SDPs
+        - VLAN extracted from first SDP's first match criterion
+    """
+    
+    id = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"id"])
+    source = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"sdps","sdp",0,"id"])
+    destination = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"sdps","sdp",1,"id"])
+    vlan = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slice-service",0,"sdps","sdp",0,"service-match-criteria","match-criterion",0,"value"])
+
+    qos_requirements = []
+
+    # Populate response with QoS requirements and VLAN from intent
+    slo_policy = safe_get(intent, ["ietf-network-slice-service:network-slice-services","slo-sle-templates","slo-sle-template",0,"slo-policy"])
+
+    # Process metrics
+    for metric in slo_policy.get("metric-bound", []):
+        constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
+        constraint_value = str(metric["bound"])
+        qos_requirements.append({
+            "constraint_type": constraint_type,
+            "constraint_value": constraint_value
+        })
+
+    # Availability
+    if "availability" in slo_policy:
+        qos_requirements.append({
+            "constraint_type": "availability[%]",
+            "constraint_value": str(slo_policy["availability"])
+        })
+
+    # MTU
+    if "mtu" in slo_policy:
+        qos_requirements.append({
+            "constraint_type": "mtu[bytes]",
+            "constraint_value": str(slo_policy["mtu"])
+        })
+    response.append({
+        "id": id,
+        "source": source,
+        "destination": destination,
+        "vlan": vlan,
+        "requirements": qos_requirements,
+    })
+    return response
\ No newline at end of file
diff --git a/src/utils/dump_templates.py b/src/utils/dump_templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3fbb444792bef2ed3e6eaa0002a569619951cba
--- /dev/null
+++ b/src/utils/dump_templates.py
@@ -0,0 +1,64 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import json, os
+from src.config.constants import TEMPLATES_PATH
+from flask import current_app
+
+def dump_templates(nbi_file, ietf_file, realizer_file):
+    """
+    Dump multiple template files as JSON for debugging and analysis.
+    
+    This utility function saves network slice templates at different processing
+    stages to disk for inspection, debugging, and documentation purposes.
+    Only executes if DUMP_TEMPLATES configuration flag is enabled.
+
+    Args:
+        nbi_file (dict): Northbound Interface template - original user/API request
+        ietf_file (dict): IETF-standardized network slice intent format
+        realizer_file (dict): Controller-specific realization template
+        
+    Returns:
+        None
+        
+    Notes:
+        - Controlled by DUMP_TEMPLATES configuration flag
+        - Files saved to TEMPLATES_PATH directory
+        - Output files:
+          * nbi_template.json - Original NBI request
+          * ietf_template.json - Standardized IETF format
+          * realizer_template.json - Controller-specific format
+        - JSON formatted with 2-space indentation for readability
+        - Silently returns if DUMP_TEMPLATES is False
+        
+    Raises:
+        IOError: If unable to write to TEMPLATES_PATH directory
+    """
+    if not current_app.config["DUMP_TEMPLATES"]:
+        return
+
+    # Map template content to output filenames
+    templates = {
+        "nbi_template.json": nbi_file,
+        "ietf_template.json": ietf_file,
+        "realizer_template.json": realizer_file,
+    }
+
+    # Write each template to disk
+    for filename, content in templates.items():
+        path = os.path.join(TEMPLATES_PATH, filename)
+        with open(path, "w") as f:
+            json.dump(content, f, indent=2)
\ No newline at end of file
diff --git a/src/utils/load_template.py b/src/utils/load_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd2cd8bb44cccce0f0a216665f5a8251a3ba504c
--- /dev/null
+++ b/src/utils/load_template.py
@@ -0,0 +1,42 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, json
+from .send_response import send_response
+
+def load_template(dir_t):
+    """
+    Load and process JSON templates for different network slice formats.
+
+    Args:
+        dir_t (str): Path to the template file
+
+    Returns:
+        dict: Parsed JSON template
+    """
+    try:
+        with open(dir_t, "r") as source:
+            template = json.loads(
+                source.read()
+                .replace("\t", "")
+                .replace("\n", "")
+                .replace("'", '"')
+                .strip()
+            )
+        return template
+    except Exception as e:
+        logging.error(f"Template loading error: {e}")
+        return send_response(False, code=500, message=f"Template loading error: {e}")
\ No newline at end of file
diff --git a/src/utils/safe_get.py b/src/utils/safe_get.py
new file mode 100644
index 0000000000000000000000000000000000000000..c02b1bfb5d20a3be562d2e2d9faea3f6a4308dae
--- /dev/null
+++ b/src/utils/safe_get.py
@@ -0,0 +1,33 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+def safe_get(dct, keys):
+    """
+    Safely retrieves a nested value from a dictionary or list.
+    Args:
+        dct (dict or list): The dictionary or list to traverse.
+        keys (list): A list of keys (for dicts) or indices (for lists) to follow.
+    Returns:
+        The value found at the nested location, or None if any key/index is not found.
+    """
+    for key in keys:
+        if isinstance(dct, dict) and key in dct:
+            dct = dct[key]
+        elif isinstance(dct, list) and isinstance(key, int) and key < len(dct):
+            dct = dct[key]
+        else:
+            return None
+    return dct
diff --git a/src/utils/send_response.py b/src/utils/send_response.py
new file mode 100644
index 0000000000000000000000000000000000000000..30c4433fa4eb8933962838584b81c43a2968bda6
--- /dev/null
+++ b/src/utils/send_response.py
@@ -0,0 +1,54 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+import logging, inspect
+
+def send_response(result, message=None, code=None, data=None):
+    """
+    Generate and send a standardized API response.
+
+    Args:
+        result (bool): Indicates success or failure. Defaults to None.
+        message (str, optional): Message (success or error). Defaults to None.
+        code (int, optional): HTTP code (default 200 for success, 400 for error). Defaults to None.
+        data (dict, optional): Additional payload. Defaults to None.
+
+    Returns:
+        tuple: (response_dict, http_status_code)
+    """
+    
+    frame = inspect.currentframe().f_back
+    filename = frame.f_code.co_filename
+    lineno = frame.f_lineno
+
+    if result:
+        code = code or 200
+        response = {
+            "success": True,
+            "data": data or {},
+            "error": None,
+        }
+    else:
+        code = code or 400
+        error_info = f"{message or 'An error occurred while processing the request.'} (File: {filename}, Line: {lineno})"
+        logging.warning(f"Request failed. Reason: {message}")
+        response = {
+            "success": False,
+            "data": None,
+            "error": error_info,
+        }
+
+    return response, code
\ No newline at end of file
diff --git a/src/webui/gui.py b/src/webui/gui.py
index 1afb831c6410b8a58d51c1b077a86c4a665db33d..49d8712e9fcfb2f368efd9d302a2e70655811015 100644
--- a/src/webui/gui.py
+++ b/src/webui/gui.py
@@ -18,12 +18,11 @@ import json, logging, uuid
 import requests
 import os
 import pandas as pd
-from flask import Flask, render_template, request, jsonify, redirect, url_for, session, Blueprint
-from collections import OrderedDict
-from src.Constants import SRC_PATH, NSC_PORT, TEMPLATES_PATH, DUMMY_MODE
-from src.realizers.ixia.NEII_V4 import NEII_controller
+from flask import render_template, request, jsonify, redirect, url_for, session, Blueprint
+from src.config.constants import SRC_PATH, NSC_PORT, TEMPLATES_PATH
+from src.realizer.ixia.helpers.NEII_V4 import NEII_controller
+from flask import current_app
 
-# app =Flask(__name__)
 gui_bp = Blueprint('gui', __name__, template_folder=os.path.join(SRC_PATH, 'webui', 'templates'), static_folder=os.path.join(SRC_PATH, 'webui', 'static'), static_url_path='/webui/static')
 
 #Variables for dev accessing
@@ -32,6 +31,15 @@ PASSWORD = 'admin'
 enter=False
 
 def __safe_int(value):
+    """
+    Safely convert a string or numeric input to int or float.
+    
+    Args:
+        value (str|int|float): The input value to convert.
+    
+    Returns:
+        int|float|None: The converted integer or float value, or None if conversion fails.
+    """
     try:
         if isinstance(value, str):
             value = value.strip().replace(',', '.')
@@ -41,9 +49,13 @@ def __safe_int(value):
         return None
 
 def __build_request_ietf(src_node_ip=None, dst_node_ip=None, vlan_id=None, bandwidth=None, latency=None, tolerance=0, latency_version=None, reliability=None):
-    '''
-    Work: Build the IETF template for the intent
-    '''
+    """
+    Build an IETF-compliant network slice request formm from inputs.
+    
+    Args: IPs, VLAN, bandwidth, latency, reliability, etc.
+
+    Returns: dict representing the JSON request.
+    """
     # Open and read the template file
     with open(os.path.join(TEMPLATES_PATH, 'ietf_template_empty.json'), 'r') as source:
         # Clean up the JSON template
@@ -109,9 +121,11 @@ def __build_request(ip_version=None, src_node_ip=None, dst_node_ip=None, src_nod
                     reliability=None, packet_reorder=None, num_pack=None, pack_reorder=None, num_reorder=None,
                     max_reorder=None, desv_reorder=None, drop_version=None, packets_drop=None,
                     drops=None, desv_drop=None):
-    '''
-    Work: Build the template for the IXIA NEII
-    '''
+    """
+    Build a JSON request formm from inputs.
+    Args: IPs, VLAN, bandwidth, latency, reliability, etc.
+    Returns: dict representing the JSON request.
+    """
     json_data = {
         "ip_version": ip_version,
         "src_node_ip": src_node_ip,
@@ -138,10 +152,14 @@ def __build_request(ip_version=None, src_node_ip=None, dst_node_ip=None, src_nod
     return json_data
 
 def __datos_json():
+    """
+    Read slice data from JSON file and return as a pandas DataFrame.
+    Returns:
+        pd.DataFrame: DataFrame containing slice data.
+    """
     try:
         with open(os.path.join(SRC_PATH, 'slice_ddbb.json'), 'r') as fichero:
             datos =json.load(fichero)
-            print(datos)
             rows =[]
             for source_ip, source_info in datos["source"].items():
                 vlan = source_info["vlan"]
@@ -165,10 +183,8 @@ def home():
     session['enter'] = False
     # Leer las IPs actuales del archivo de configuración
     try:
-        with open(os.path.join(SRC_PATH, 'IPs.json')) as f:
-            ips = json.load(f)
-            tfs_ip = ips.get('TFS_IP', 'No configurada')
-            ixia_ip = ips.get('IXIA_IP', 'No configurada')
+        tfs_ip = current_app.config["TFS_IP"]
+        ixia_ip = current_app.config["IXIA_IP"]
     except Exception:
         tfs_ip = 'No configurada'
         ixia_ip = 'No configurada'
@@ -292,7 +308,7 @@ def develop():
 
         json_data = __build_request(ip_version=ip_version, src_node_ip=src_node_ipv4, dst_node_ip=dst_node_ipv4, src_node_ipv6=src_node_ipv6, dst_node_ipv6=dst_node_ipv6, vlan_id=vlan_id, latency=latency, bandwidth=bandwidth, latency_version=latency_version, tolerance=tolerance, packet_reorder=packet_reorder, num_pack=num_pack, pack_reorder=pack_reorder, num_reorder=num_reorder, max_reorder=max_reorder, desv_reorder=desv_reorder, drop_version=drop_version, packets_drop=packets_drop, drops=drops, desv_drop=desv_drop)
         logging.debug("Generated JSON data: %s", json_data)
-        if not DUMMY_MODE:
+        if not current_app.config["DUMMY_MODE"]:
             NEII_controller().nscNEII(json_data)
 
         session['enter'] = True
@@ -350,14 +366,14 @@ def search():
         response.raise_for_status()
         ixia_slices = response.json()
 
-        # Combinar los slices de TFS e IXIA
+        # Combine slices from both controllers
         slices = tfs_slices + ixia_slices
        
     except requests.RequestException as e:
         logging.error("Error fetching slices: %s", e)
         return render_template('search.html', error="No se pudieron obtener los slices.", dataframe_html="")
 
-    # Extraer datos relevantes y construir un DataFrame
+    # Extract relevant data for DataFrame
     rows = []
     for item in slices:
         try:
@@ -370,7 +386,7 @@ def search():
             vlan = sdp[0]["service-match-criteria"]["match-criterion"][0]["value"]
             controller = item["controller"]
 
-            # Construir atributos dinámicamente
+            # Build attributes list
             attributes = []
             for metric in metric_bound:
                 if metric.get("metric-type", "") == "one-way-bandwidth":
@@ -454,21 +470,25 @@ def update_ips():
     tfs_ip = data.get('tfs_ip')
     ixia_ip = data.get('ixia_ip')
 
-    # Cargar datos existentes si el archivo existe
+    # Load existing IPs from the configuration file
     config_path = os.path.join(SRC_PATH, 'IPs.json')
     if os.path.exists(config_path):
         with open(config_path) as f:
             ips = json.load(f)
+        ips = {
+            "TFS_IP": current_app.config["TFS_IP"],
+            "IXIA_IP": current_app.config["IXIA_IP"]
+        }
     else:
         ips = {"TFS_IP": "", "IXIA_IP": ""}
 
-    # Actualizar solo los campos recibidos
+    # Update IPs if provided
     if tfs_ip:
         ips['TFS_IP'] = tfs_ip
     if ixia_ip:
         ips['IXIA_IP'] = ixia_ip
 
-    # Guardar de nuevo el archivo con los valores actualizados
+    # Save updated IPs back to the file
     with open(config_path, 'w') as f:
         json.dump(ips, f, indent=4)
 
diff --git a/swagger/E2E_namespace.py b/swagger/E2E_namespace.py
new file mode 100644
index 0000000000000000000000000000000000000000..a53cd0de84a58344d9a5401a16bd04739179a6cf
--- /dev/null
+++ b/swagger/E2E_namespace.py
@@ -0,0 +1,153 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (E2E) (https://E2E.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
+from flask import request
+from flask_restx import Namespace, Resource, reqparse
+from src.main import NSController
+from src.api.main import Api
+import json
+from swagger.models.create_models import create_gpp_nrm_28541_model, create_ietf_network_slice_nbi_yang_model
+
+e2e_ns = Namespace(
+    "E2E",
+    description="Operations related to transport network slices with E2E Orchestrator"
+)
+
+
+# 3GPP NRM TS28.541 Data models
+gpp_network_slice_request_model = create_gpp_nrm_28541_model(e2e_ns)
+
+# IETF draft-ietf-teas-ietf-network-slice-nbi-yang Data models
+
+slice_ddbb_model, slice_response_model = create_ietf_network_slice_nbi_yang_model(e2e_ns)
+
+upload_parser = reqparse.RequestParser()
+upload_parser.add_argument('file', location='files', type='FileStorage', help="File to upload")
+upload_parser.add_argument('json_data', location='form', help="JSON Data in string format")
+
+# Namespace Controllers
+@e2e_ns.route("/slice")
+class E2ESliceList(Resource):
+    @e2e_ns.doc(summary="Return all transport network slices", description="Returns all transport network slices from the slice controller.")
+    @e2e_ns.response(200, "Slices returned", slice_ddbb_model)
+    @e2e_ns.response(404, "Transport network slices not found")
+    @e2e_ns.response(500, "Internal server error")
+    def get(self):
+        """Retrieve all slices"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).get_flows()
+        return data, code
+    
+    @e2e_ns.doc(summary="Submit a transport network slice request", description="This endpoint allows clients to submit transport network slice requests using a JSON payload.")
+    @e2e_ns.response(201,"Slice created successfully", slice_response_model)
+    @e2e_ns.response(200, "No service to process.")
+    @e2e_ns.response(400, "Invalid request format")
+    @e2e_ns.response(500, "Internal server error")
+    @e2e_ns.expect(upload_parser)
+    def post(self):
+        """Submit a new slice request with a file"""
+
+        json_data = None
+
+        # Try to get the JSON data from the uploaded file
+        uploaded_file = request.files.get('file')
+        if uploaded_file:
+            if not uploaded_file.filename.endswith('.json'):
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "Only JSON files allowed"
+                }, 400
+            
+            try:
+                json_data = json.load(uploaded_file)  # Convert file to JSON
+            except json.JSONDecodeError:
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "JSON file not valid"
+                }, 400
+
+        # If no file was uploaded, try to get the JSON data from the form
+        if json_data is None:
+            raw_json = request.form.get('json_data')
+            if raw_json:
+                try:
+                    json_data = json.loads(raw_json)  # Convert string to JSON
+                except json.JSONDecodeError:
+                    return {
+                        "success": False,
+                        "data": None,
+                        "error": "JSON file not valid"
+                    }, 400
+        
+        # If no JSON data was found, return an error
+        if json_data is None:
+            return {
+                    "success": False,
+                    "data": None,
+                    "error": "No data sent"
+                }, 400
+
+        # Process the JSON data with the NSController
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).add_flow(json_data)
+        return data, code
+    
+    @e2e_ns.doc(summary="Delete all transport network slices", description="Deletes all transport network slices from the slice controller.")
+    @e2e_ns.response(204, "All transport network slices deleted successfully.")
+    @e2e_ns.response(500, "Internal server error")
+    def delete(self):
+        """Delete all slices"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).delete_flows()
+        return data, code
+
+
+@e2e_ns.route("/slice/")
+@e2e_ns.doc(params={"slice_id": "The ID of the slice to retrieve or modify"})
+class E2ESlice(Resource):
+    @e2e_ns.doc(summary="Return a specific transport network slice", description="Returns specific information related to a slice by providing its id")
+    @e2e_ns.response(200, "Slice returned", slice_ddbb_model)
+    @e2e_ns.response(404, "Transport network slice not found.")
+    @e2e_ns.response(500, "Internal server error")
+    def get(self, slice_id):
+        """Retrieve a specific slice"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).get_flows(slice_id)
+        return data, code
+
+    @e2e_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
+    @e2e_ns.response(204, "Transport network slice deleted successfully.")
+    @e2e_ns.response(404, "Transport network slice not found.")
+    @e2e_ns.response(500, "Internal server error")
+    def delete(self, slice_id):
+        """Delete a slice"""
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).delete_flows(slice_id)
+        return data, code
+
+    @e2e_ns.expect(slice_ddbb_model, validate=True)
+    @e2e_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
+    @e2e_ns.response(200, "Slice modified", slice_response_model)
+    @e2e_ns.response(404, "Transport network slice not found.")
+    @e2e_ns.response(500, "Internal server error")
+    def put(self, slice_id):
+        """Modify a slice"""
+        json_data = request.get_json()
+        controller = NSController(controller_type="E2E")
+        data, code = Api(controller).modify_flow(slice_id, json_data)
+        return data, code
diff --git a/swagger/ixia_namespace.py b/swagger/ixia_namespace.py
index 6a14ffe995ad0fb2228c2d8deaf6dd91060ea510..e8f4ac9c04f97c832f78828a48ec0a637b675475 100644
--- a/swagger/ixia_namespace.py
+++ b/swagger/ixia_namespace.py
@@ -1,6 +1,23 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is an original contribution from Telefonica Innovación Digital S.L.
+
 from flask import request
-from flask_restx import Namespace, Resource, fields, reqparse
-from src.network_slice_controller import NSController
+from flask_restx import Namespace, Resource, reqparse
+from src.main import NSController
+from src.api.main import Api
 import json
 from swagger.models.create_models import create_gpp_nrm_28541_model, create_ietf_network_slice_nbi_yang_model
 
@@ -13,13 +30,13 @@ ixia_ns = Namespace(
 gpp_network_slice_request_model = create_gpp_nrm_28541_model(ixia_ns)
 
 # IETF draft-ietf-teas-ietf-network-slice-nbi-yang Data models
-
 slice_ddbb_model, slice_response_model = create_ietf_network_slice_nbi_yang_model(ixia_ns)
 
 upload_parser = reqparse.RequestParser()
 upload_parser.add_argument('file', location='files', type='FileStorage', help="Archivo a subir")
 upload_parser.add_argument('json_data', location='form', help="Datos JSON en formato string")
 
+
 # Namespace Controllers
 @ixia_ns.route("/slice")
 class IxiaSliceList(Resource):
@@ -30,53 +47,51 @@ class IxiaSliceList(Resource):
     def get(self):
         """Retrieve all slices"""
         controller = NSController(controller_type="IXIA")
-        return controller.get_flows()
+        data, code = Api(controller).get_flows()
+        return data, code
     
     @ixia_ns.doc(summary="Submit a transport network slice request", description="This endpoint allows clients to submit transport network slice requests using a JSON payload.")
-    @ixia_ns.response(200, "Slice request successfully processed", slice_response_model)
+    @ixia_ns.response(201, "Slice created successfully", slice_response_model)
+    @ixia_ns.response(200, "No service to process.")
     @ixia_ns.response(400, "Invalid request format")
     @ixia_ns.response(500, "Internal server error")
     @ixia_ns.expect(upload_parser)
     def post(self):
         """Submit a new slice request with a file"""
-
         json_data = None
 
-        # Try to get the JSON data from the uploaded file
         uploaded_file = request.files.get('file')
         if uploaded_file:
             if not uploaded_file.filename.endswith('.json'):
-                return {"error": "Only JSON files allowed"}, 400
-            
+                return {"success": False, "data": None, "error": "Only JSON files allowed"}, 400
             try:
-                json_data = json.load(uploaded_file)  # Convert file to JSON
+                json_data = json.load(uploaded_file)
             except json.JSONDecodeError:
-                return {"error": "JSON file not valid"}, 400
+                return {"success": False, "data": None, "error": "JSON file not valid"}, 400
 
-        # If no file was uploaded, try to get the JSON data from the form
         if json_data is None:
             raw_json = request.form.get('json_data')
             if raw_json:
                 try:
-                    json_data = json.loads(raw_json)  # Convert string to JSON
+                    json_data = json.loads(raw_json)
                 except json.JSONDecodeError:
-                    return {"error": "JSON file not valid"}, 400
-        
-        # If no JSON data was found, return an error
+                    return {"success": False, "data": None, "error": "JSON file not valid"}, 400
+
         if json_data is None:
-            return {"error": "No data sent"}, 400
+            return {"success": False, "data": None, "error": "No data sent"}, 400
 
-        # Process the JSON data with the NSController
         controller = NSController(controller_type="IXIA")
-        return controller.add_flow(json_data)
+        data, code = Api(controller).add_flow(json_data)
+        return data, code
     
     @ixia_ns.doc(summary="Delete all transport network slices", description="Deletes all transport network slices from the slice controller.")
-    @ixia_ns.response(200, "All transport network slices deleted successfully.")
+    @ixia_ns.response(204, "All transport network slices deleted successfully.")
     @ixia_ns.response(500, "Internal server error")
     def delete(self):
         """Delete all slices"""
         controller = NSController(controller_type="IXIA")
-        return controller.delete_flows()
+        data, code = Api(controller).delete_flows()
+        return data, code
 
 
 @ixia_ns.route("/slice/")
@@ -89,16 +104,18 @@ class IxiaSlice(Resource):
     def get(self, slice_id):
         """Retrieve a specific slice"""
         controller = NSController(controller_type="IXIA")
-        return controller.get_flows(slice_id)
+        data, code = Api(controller).get_flows(slice_id)
+        return data, code
 
     @ixia_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
-    @ixia_ns.response(200, "Transport network slice deleted successfully.")
+    @ixia_ns.response(204, "Transport network slice deleted successfully.")
     @ixia_ns.response(404, "Transport network slice not found.")
     @ixia_ns.response(500, "Internal server error")
     def delete(self, slice_id):
         """Delete a slice"""
         controller = NSController(controller_type="IXIA")
-        return controller.delete_flows(slice_id)
+        data, code = Api(controller).delete_flows(slice_id)
+        return data, code
 
     @ixia_ns.expect(slice_ddbb_model, validate=True)
     @ixia_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
@@ -109,4 +126,5 @@ class IxiaSlice(Resource):
         """Modify a slice"""
         json_data = request.get_json()
         controller = NSController(controller_type="IXIA")
-        return controller.modify_flow(slice_id, json_data)
\ No newline at end of file
+        data, code = Api(controller).modify_flow(slice_id, json_data)
+        return data, code
\ No newline at end of file
diff --git a/swagger/models/create_models.py b/swagger/models/create_models.py
index 94ca83bc53b978beb68512dd5959452375256f67..a1f7d3a94758b1ea3efe10a41271fead6b8ab6c7 100644
--- a/swagger/models/create_models.py
+++ b/swagger/models/create_models.py
@@ -293,34 +293,50 @@ def create_ietf_network_slice_nbi_yang_model(slice_ns):
 
     slice_ddbb_model = slice_ns.model('ddbb_model', {
         'slice_id': fields.String(),
-        'intent': fields.List(fields.Nested(ietf_network_slice_request_model))
+        'intent': fields.List(fields.Nested(ietf_network_slice_request_model)),
+        'controller': fields.String()
     })
 
 
     slice_response_model = slice_ns.model(
         "SliceResponse",
         {
-            "status": fields.String(description="Status of the request", example="success"),
-            "slices": fields.List(
-                fields.Nested(
-                    slice_ns.model(
-                        "SliceDetails",
-                        {
-                            "id": fields.String(description="Slice ID", example="CU-UP1_DU1"),
-                            "source": fields.String(description="Source IP", example="100.2.1.2"),
-                            "destination": fields.String(description="Destination IP", example="100.1.1.2"),
-                            "vlan": fields.String(description="VLAN ID", example="100"),
-                            "bandwidth(Mbps)": fields.Integer(
-                                description="Bandwidth in Mbps", example=120
-                            ),
-                            "latency(ms)": fields.Integer(
-                                description="Latency in milliseconds", example=4
+            "success": fields.Boolean(description="Indicates if the request was successful", example=True),
+            "data": fields.Nested(
+                slice_ns.model(
+                    "SliceData",
+                    {
+                        "slices": fields.List(
+                            fields.Nested(
+                                slice_ns.model(
+                                    "SliceDetails",
+                                    {
+                                        "id": fields.String(description="Slice ID", example="slice-service-11327140-7361-41b3-aa45-e84a7fb40be9"),
+                                        "source": fields.String(description="Source IP", example="10.60.11.3"),
+                                        "destination": fields.String(description="Destination IP", example="10.60.60.105"),
+                                        "vlan": fields.String(description="VLAN ID", example="100"),
+                                        "requirements": fields.List(
+                                            fields.Nested(
+                                                slice_ns.model(
+                                                    "SliceRequirement",
+                                                    {
+                                                        "constraint_type": fields.String(description="Type of constraint", example="one-way-bandwidth[kbps]"),
+                                                        "constraint_value": fields.String(description="Constraint value", example="2000")
+                                                    }
+                                                )
+                                            ),
+                                            description="List of requirements for the slice"
+                                        )
+                                    }
+                                )
                             ),
-                        },
-                    )
-                ),
-                description="List of slices",
+                            description="List of slices"
+                        ),
+                        "setup_time": fields.Float(description="Slice setup time in milliseconds", example=12.57),
+                    }
+                )
             ),
-        },
+            "error": fields.String(description="Error message if request failed", example=None)
+        }
     )
     return slice_ddbb_model, slice_response_model
\ No newline at end of file
diff --git a/swagger/tfs_namespace.py b/swagger/tfs_namespace.py
index c9c3e07f591d13390df92712a746843a8d2326bd..09163602aeca9bb14521b272bcfa980093a9c5f2 100644
--- a/swagger/tfs_namespace.py
+++ b/swagger/tfs_namespace.py
@@ -16,7 +16,8 @@
 
 from flask import request
 from flask_restx import Namespace, Resource, fields, reqparse
-from src.network_slice_controller import NSController
+from src.main import NSController
+from src.api.main import Api
 import json
 from swagger.models.create_models import create_gpp_nrm_28541_model, create_ietf_network_slice_nbi_yang_model
 
@@ -33,8 +34,8 @@ gpp_network_slice_request_model = create_gpp_nrm_28541_model(tfs_ns)
 slice_ddbb_model, slice_response_model = create_ietf_network_slice_nbi_yang_model(tfs_ns)
 
 upload_parser = reqparse.RequestParser()
-upload_parser.add_argument('file', location='files', type='FileStorage', help="Archivo a subir")
-upload_parser.add_argument('json_data', location='form', help="Datos JSON en formato string")
+upload_parser.add_argument('file', location='files', type='FileStorage', help="File to upload")
+upload_parser.add_argument('json_data', location='form', help="JSON Data in string format")
 
 # Namespace Controllers
 @tfs_ns.route("/slice")
@@ -46,10 +47,12 @@ class TfsSliceList(Resource):
     def get(self):
         """Retrieve all slices"""
         controller = NSController(controller_type="TFS")
-        return controller.get_flows()
+        data, code = Api(controller).get_flows()
+        return data, code
     
     @tfs_ns.doc(summary="Submit a transport network slice request", description="This endpoint allows clients to submit transport network slice requests using a JSON payload.")
-    @tfs_ns.response(200, "Slice request successfully processed", slice_response_model)
+    @tfs_ns.response(201,"Slice created successfully", slice_response_model)
+    @tfs_ns.response(200, "No service to process.")
     @tfs_ns.response(400, "Invalid request format")
     @tfs_ns.response(500, "Internal server error")
     @tfs_ns.expect(upload_parser)
@@ -62,12 +65,20 @@ class TfsSliceList(Resource):
         uploaded_file = request.files.get('file')
         if uploaded_file:
             if not uploaded_file.filename.endswith('.json'):
-                return {"error": "Only JSON files allowed"}, 400
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "Only JSON files allowed"
+                }, 400
             
             try:
                 json_data = json.load(uploaded_file)  # Convert file to JSON
             except json.JSONDecodeError:
-                return {"error": "JSON file not valid"}, 400
+                return {
+                    "success": False,
+                    "data": None,
+                    "error": "JSON file not valid"
+                }, 400
 
         # If no file was uploaded, try to get the JSON data from the form
         if json_data is None:
@@ -76,23 +87,33 @@ class TfsSliceList(Resource):
                 try:
                     json_data = json.loads(raw_json)  # Convert string to JSON
                 except json.JSONDecodeError:
-                    return {"error": "JSON file not valid"}, 400
+                    return {
+                        "success": False,
+                        "data": None,
+                        "error": "JSON file not valid"
+                    }, 400
         
         # If no JSON data was found, return an error
         if json_data is None:
-            return {"error": "No data sent"}, 400
+            return {
+                    "success": False,
+                    "data": None,
+                    "error": "No data sent"
+                }, 400
 
         # Process the JSON data with the NSController
         controller = NSController(controller_type="TFS")
-        return controller.add_flow(json_data)
+        data, code = Api(controller).add_flow(json_data)
+        return data, code
     
     @tfs_ns.doc(summary="Delete all transport network slices", description="Deletes all transport network slices from the slice controller.")
-    @tfs_ns.response(200, "All transport network slices deleted successfully.")
+    @tfs_ns.response(204, "All transport network slices deleted successfully.")
     @tfs_ns.response(500, "Internal server error")
     def delete(self):
         """Delete all slices"""
         controller = NSController(controller_type="TFS")
-        return controller.delete_flows()
+        data, code = Api(controller).delete_flows()
+        return data, code
 
 
 @tfs_ns.route("/slice/")
@@ -105,26 +126,29 @@ class TfsSlice(Resource):
     def get(self, slice_id):
         """Retrieve a specific slice"""
         controller = NSController(controller_type="TFS")
-        return controller.get_flows(slice_id)
+        data, code = Api(controller).get_flows(slice_id)
+        return data, code
 
     @tfs_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
-    @tfs_ns.response(200, "Transport network slice deleted successfully.")
+    @tfs_ns.response(204, "Transport network slice deleted successfully.")
     @tfs_ns.response(404, "Transport network slice not found.")
     @tfs_ns.response(500, "Internal server error")
     def delete(self, slice_id):
         """Delete a slice"""
         controller = NSController(controller_type="TFS")
-        return controller.delete_flows(slice_id)
+        data, code = Api(controller).delete_flows(slice_id)
+        return data, code
 
     @tfs_ns.expect(slice_ddbb_model, validate=True)
     @tfs_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
-    @tfs_ns.response(200, "Slice modified", slice_ddbb_model)
+    @tfs_ns.response(200, "Slice modified", slice_response_model)
     @tfs_ns.response(404, "Transport network slice not found.")
     @tfs_ns.response(500, "Internal server error")
     def put(self, slice_id):
         """Modify a slice"""
         json_data = request.get_json()
         controller = NSController(controller_type="TFS")
-        return controller.modify_flow(slice_id, json_data)
+        data, code = Api(controller).modify_flow(slice_id, json_data)
+        return data, code