diff --git a/app.py b/app.py
index 61503b3b4600483708559bac25bee7cb4588a2a6..d2ed0617fce0c3400dce48682bec83b5b294340f 100644
--- a/app.py
+++ b/app.py
@@ -1,49 +1,47 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file is an original contribution from Telefonica Innovación Digital S.L.
-
-import os
+import logging
from flask import Flask
from flask_restx import Api
from flask_cors import CORS
from swagger.tfs_namespace import tfs_ns
from swagger.ixia_namespace import ixia_ns
-from src.Constants import NSC_PORT, WEBUI_DEPLOY
+from src.config.constants import NSC_PORT
from src.webui.gui import gui_bp
+from src.config.config import create_config
+
+
+def create_app():
+ """Factory para crear la app Flask con la configuración cargada"""
+ app = Flask(__name__)
+ app = create_config(app)
+ CORS(app)
+
+ # Configure logging to provide clear and informative log messages
+ logging.basicConfig(
+ level=app.config["LOGGING_LEVEL"],
+ format="%(levelname)s - %(message)s"
+ )
-app = Flask(__name__)
-CORS(app)
+ # Create API instance
+ api = Api(
+ app,
+ version="1.0",
+ title="Network Slice Controller (NSC) API",
+ description="API for orchestrating and realizing transport network slice requests",
+ doc="/nsc" # Swagger UI URL
+ )
-# Create API instance
-api = Api(
- app,
- version="1.0",
- title="Network Slice Controller (NSC) API",
- description="API for orchestrating and realizing transport network slice requests",
- doc="/nsc" # Swagger UI URL
-)
+ # Register namespaces
+ api.add_namespace(tfs_ns, path="/tfs")
+ api.add_namespace(ixia_ns, path="/ixia")
-# Register namespaces
-api.add_namespace(tfs_ns, path="/tfs")
-api.add_namespace(ixia_ns, path="/ixia")
-#gui_bp = Blueprint('gui', __name__, template_folder='templates')
+ if app.config["WEBUI_DEPLOY"]:
+ app.secret_key = "clave-secreta-dev"
+ app.register_blueprint(gui_bp)
-if WEBUI_DEPLOY:
- app.secret_key = 'clave-secreta-dev'
- app.register_blueprint(gui_bp)
+ return app
+# Solo arrancamos el servidor si ejecutamos el script directamente
if __name__ == "__main__":
+ app = create_app()
app.run(host="0.0.0.0", port=NSC_PORT, debug=True)
diff --git a/src/Constants.py b/src/Constants.py
deleted file mode 100644
index 3b02ffd287c6eced608c00b993a71605fe53d0d4..0000000000000000000000000000000000000000
--- a/src/Constants.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file includes original contributions from Telefonica Innovación Digital S.L.
-
-import logging, os, json
-
-# Default logging level
-DEFAULT_LOGGING_LEVEL = logging.INFO
-
-# Default port for NSC deployment
-NSC_PORT = 8081
-
-# Paths
-# Obtain the absolute path of the current file
-SRC_PATH = os.path.dirname(os.path.abspath(__file__))
-with open(os.path.join(SRC_PATH, 'IPs.json')) as f:
- ips = json.load(f)
-
-# Create the path to the desired file relative to the current file
-TEMPLATES_PATH = os.path.join(SRC_PATH, "templates")
-
-# Dump templates
-DUMP_TEMPLATES = False
-
-# Mapper
-
-# Flag to determine if the NSC performs NRPs
-NRP_ENABLED = False
-# Planner Flags
-PLANNER_ENABLED = True
-# Flag to determine if external PCE is used
-PCE_EXTERNAL = False
-
-# Realizer
-
-# Controller Flags
-# If True, config is not sent to controllers
-DUMMY_MODE = False
-
-#####TERAFLOW#####
-# Teraflow IP
-TFS_IP = ips.get('TFS_IP')
-UPLOAD_TYPE = "WEBUI" # "WEBUI" or "NBI"
-NBI_L2_PATH = "restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services"
-NBI_L3_PATH = "restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services"
-# Flag to determine if additional L2VPN configuration support is required for deploying L2VPNs with path selection
-TFS_L2VPN_SUPPORT = False
-
-#####IXIA#####
-# IXIA NEII IP
-IXIA_IP = ips.get('IXIA_IP')
-
-# WebUI
-
-# Flag to deploy the WebUI
-WEBUI_DEPLOY = True
\ No newline at end of file
diff --git a/src/api/main.py b/src/api/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..56c922b20a279209b469b48886a5d121bd37a874
--- /dev/null
+++ b/src/api/main.py
@@ -0,0 +1,193 @@
+from src.config.constants import DATABASE_PATH
+from src.utils.send_response import send_response
+import os, json, logging
+from flask import current_app
+
+class Api:
+ def __init__(self, slice_service):
+ self.slice_service = slice_service
+
+ def add_flow(self, intent):
+ """
+ Create a new transport network slice.
+
+ Args:
+ intent (dict): Network slice intent in 3GPP or IETF format
+
+ Returns:
+ Result of the Network Slice Controller (NSC) operation
+
+ API Endpoint:
+ POST /slice
+
+ Raises:
+ ValueError: If no transport network slices are found
+ Exception: For unexpected errors during slice creation process
+ """
+ try:
+ result = self.slice_service.nsc(intent)
+ if not result:
+ return send_response(False, code=404, message="No intents found")
+
+ return send_response(
+ True,
+ code=201,
+ data=result
+ )
+ except Exception as e:
+ # Handle unexpected errors
+ return send_response(False, code=500, message=str(e))
+
+ def get_flows(self,slice_id=None):
+ """
+ Retrieve transport network slice information.
+
+ This method allows retrieving:
+ - All transport network slices
+ - A specific slice by its ID
+
+ Args:
+ slice_id (str, optional): Unique identifier of a specific slice.
+ Defaults to None.
+
+ Returns:
+ dict or list:
+ - If slice_id is provided: Returns the specific slice details
+ - If slice_id is None: Returns a list of all slices
+ - Returns an error response if no slices are found
+
+ API Endpoint:
+ GET /slice/{id}
+
+ Raises:
+ ValueError: If no transport network slices are found
+ Exception: For unexpected errors during file processing
+ """
+ try:
+ # Read slice database from JSON file
+ with open(os.path.join(DATABASE_PATH, "slice_ddbb.json"), 'r') as file:
+ content = json.load(file)
+ # If specific slice ID is provided, find and return matching slice
+ if slice_id:
+ for slice in content:
+ if slice["slice_id"] == slice_id:
+ return slice, 200
+ raise ValueError("Transport network slices not found")
+ # If no slices exist, raise an error
+ if len(content) == 0:
+ raise ValueError("Transport network slices not found")
+
+ # Return all slices if no specific ID is given
+ return [slice for slice in content if slice.get("controller") == self.slice_service.controller_type], 200
+
+ except ValueError as e:
+ # Handle case where no slices are found
+ return send_response(False, code=404, message=str(e))
+ except Exception as e:
+ # Handle unexpected errors
+ return send_response(False, code=500, message=str(e))
+
+ def modify_flow(self,slice_id, intent):
+ """
+ Modify an existing transport network slice.
+
+ Args:
+ slice_id (str): Unique identifier of the slice to modify
+ intent (dict): New intent configuration for the slice
+
+ Returns:
+ Result of the Network Slice Controller (NSC) operation
+
+ API Endpoint:
+ PUT /slice/{id}
+ """
+ try:
+ result = self.slice_service.nsc(intent, slice_id)
+ if not result:
+ return send_response(False, code=404, message="Slice not found")
+
+ return send_response(
+ True,
+ code=200,
+ message="Slice modified successfully",
+ data=result
+ )
+ except Exception as e:
+ # Handle unexpected errors
+ return send_response(False, code=500, message=str(e))
+
+ def delete_flows(self, slice_id=None):
+ """
+ Delete transport network slice(s).
+
+ This method supports:
+ - Deleting a specific slice by ID
+ - Deleting all slices
+ - Optional cleanup of L2VPN configurations
+
+ Args:
+ slice_id (str, optional): Unique identifier of slice to delete.
+ Defaults to None.
+
+ Returns:
+ dict: Response indicating successful deletion or error details
+
+ API Endpoint:
+ DELETE /slice/{id}
+
+ Raises:
+ ValueError: If no slices are found to delete
+ Exception: For unexpected errors during deletion process
+
+ Notes:
+ - If controller_type is TFS, attempts to delete from Teraflow
+ - If need_l2vpn_support is True, performs additional L2VPN cleanup
+ """
+ try:
+ # Read current slice database
+ with open(os.path.join(DATABASE_PATH, "slice_ddbb.json"), 'r') as file:
+ content = json.load(file)
+ id = None
+
+ # Delete specific slice if slice_id is provided
+ if slice_id:
+ for i, slice in enumerate(content):
+ if slice["slice_id"] == slice_id and slice.get("controller") == self.slice_service.controller_type:
+ del content[i]
+ id = i
+ break
+ # Raise error if slice not found
+ if id is None:
+ raise ValueError("Transport network slice not found")
+ # Update slice database
+ with open(os.path.join(DATABASE_PATH, "slice_ddbb.json"), 'w') as file:
+ json.dump(content, file, indent=4)
+ logging.info(f"Slice {slice_id} removed successfully")
+ return {}, 204
+
+ # Delete all slices
+ else:
+ # Optional: Delete in Teraflow if configured
+ if self.slice_service.controller_type == "TFS":
+ # TODO: should send a delete request to Teraflow
+ if current_app.config["TFS_L2VPN_SUPPORT"]:
+ self.slice_service.tfs_l2vpn_delete()
+
+ data_removed = [slice for slice in content if slice.get("controller") == self.slice_service.controller_type]
+
+ # Verify slices exist before deletion
+ if len(data_removed) == 0:
+ raise ValueError("Transport network slices not found")
+
+ filtered_data = [slice for slice in content if slice.get("controller") != self.slice_service.controller_type]
+ # Clear slice database
+ with open(os.path.join(DATABASE_PATH, "slice_ddbb.json"), 'w') as file:
+ json.dump(filtered_data, file, indent=4)
+
+ logging.info("All slices removed successfully")
+ return {}, 204
+
+ except ValueError as e:
+ return send_response(False, code=404, message=str(e))
+ except Exception as e:
+ return send_response(False, code=500, message=str(e))
\ No newline at end of file
diff --git a/src/config/.env.example b/src/config/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..e525ebd31b92ceb568f2df25170718fd0be14e35
--- /dev/null
+++ b/src/config/.env.example
@@ -0,0 +1,39 @@
+# -------------------------
+# General
+# -------------------------
+LOGGING_LEVEL=INFO # Options: CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
+DUMP_TEMPLATES=false
+
+# -------------------------
+# Mapper
+# -------------------------
+# Flag to determine if the NSC performs NRPs
+NRP_ENABLED=false
+# Planner Flags
+PLANNER_ENABLED=true
+# Flag to determine if external PCE is used
+PCE_EXTERNAL=false
+
+# -------------------------
+# Realizer
+# -------------------------
+# If true, no config sent to controllers
+DUMMY_MODE=true
+
+# -------------------------
+# Teraflow
+# -------------------------
+TFS_IP=127.0.0.1
+UPLOAD_TYPE=WEBUI # Options: WEBUI o NBI
+# Flag to determine if additional L2VPN configuration support is required for deploying L2VPNs with path selection
+TFS_L2VPN_SUPPORT=false
+
+# -------------------------
+# IXIA
+# -------------------------
+IXIA_IP=127.0.0.1
+
+# -------------------------
+# WebUI
+# -------------------------
+WEBUI_DEPLOY=true
diff --git a/src/IPs.json b/src/config/IPs.json
similarity index 100%
rename from src/IPs.json
rename to src/config/IPs.json
diff --git a/src/config/config.py b/src/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..32a13076e2f33c36e4a6592fade63088574f06b8
--- /dev/null
+++ b/src/config/config.py
@@ -0,0 +1,45 @@
+import os
+from dotenv import load_dotenv
+from flask import Flask
+import logging
+
+# Load .env file if present
+load_dotenv()
+
+LOG_LEVELS = {
+ "CRITICAL": logging.CRITICAL,
+ "ERROR": logging.ERROR,
+ "WARNING": logging.WARNING,
+ "INFO": logging.INFO,
+ "DEBUG": logging.DEBUG,
+ "NOTSET": logging.NOTSET,
+}
+
+def create_config(app: Flask):
+ """Load flags into Flask app.config"""
+ # Default logging level
+ app.config["LOGGING_LEVEL"] = LOG_LEVELS.get(os.getenv("LOGGING_LEVEL", "INFO").upper(),logging.INFO)
+
+ # Dump templates
+ app.config["DUMP_TEMPLATES"] = os.getenv("DUMP_TEMPLATES", "false").lower() == "true"
+
+ # Mapper
+ app.config["NRP_ENABLED"] = os.getenv("NRP_ENABLED", "false").lower() == "true"
+ app.config["PLANNER_ENABLED"] = os.getenv("PLANNER_ENABLED", "false").lower() == "true"
+ app.config["PCE_EXTERNAL"] = os.getenv("PCE_EXTERNAL", "false").lower() == "true"
+
+ # Realizer
+ app.config["DUMMY_MODE"] = os.getenv("DUMMY_MODE", "true").lower() == "true"
+
+ # Teraflow
+ app.config["TFS_IP"] = os.getenv("TFS_IP", "127.0.0.1")
+ app.config["UPLOAD_TYPE"] = os.getenv("UPLOAD_TYPE", "WEBUI")
+ app.config["TFS_L2VPN_SUPPORT"] = os.getenv("TFS_L2VPN_SUPPORT", "false").lower() == "true"
+
+ # IXIA
+ app.config["IXIA_IP"] = os.getenv("IXIA_IP", "127.0.0.1")
+
+ # WebUI
+ app.config["WEBUI_DEPLOY"] = os.getenv("WEBUI_DEPLOY", "false").lower() == "true"
+
+ return app
diff --git a/src/config/constants.py b/src/config/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb04fb4a4e8a156c91104f7f2250becfa9daacd8
--- /dev/null
+++ b/src/config/constants.py
@@ -0,0 +1,31 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file includes original contributions from Telefonica Innovación Digital S.L.
+from pathlib import Path
+
+# Default port for NSC deployment
+NSC_PORT = 8081
+
+# Paths
+BASE_DIR = Path(__file__).resolve().parent.parent.parent
+SRC_PATH = BASE_DIR / "src"
+TEMPLATES_PATH = SRC_PATH / "templates"
+DATABASE_PATH = SRC_PATH / "database"
+CONFIG_PATH = SRC_PATH / "config"
+NBI_L2_PATH = "restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services"
+NBI_L3_PATH = "restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services"
+
+
+
diff --git a/src/nrp_ddbb.json b/src/database/nrp_ddbb.json
similarity index 91%
rename from src/nrp_ddbb.json
rename to src/database/nrp_ddbb.json
index 948967ef9fd1a9389ac634b19255857a5e13d3aa..1616438516aabb21393b339ee45bf7dc637803c2 100644
--- a/src/nrp_ddbb.json
+++ b/src/database/nrp_ddbb.json
@@ -6,12 +6,12 @@
{
"metric-type": "one-way-bandwidth",
"metric-unit": "kbps",
- "bound": 1
+ "bound": 100000000000
},
{
"metric-type": "one-way-delay-maximum",
"metric-unit": "milliseconds",
- "bound": 800
+ "bound": 1
}
],
"slices": ["slice-service-02873501-bf0a-4b02-8540-2f9d970ea20f", "slice-service-e3b22fa8-f3da-4da8-881b-c66e5161b4a5"],
@@ -24,12 +24,12 @@
{
"metric-type": "one-way-bandwidth",
"metric-unit": "kbps",
- "bound": 1
+ "bound": 10000000000000
},
{
"metric-type": "one-way-delay-maximum",
"metric-unit": "milliseconds",
- "bound": 800
+ "bound": 2
}
],
"slices": ["slice-service-02873501-bf0a-4b02-8540-2f9d970ea20f", "slice-service-e3b22fa8-f3da-4da8-881b-c66e5161b4a5"],
diff --git a/src/slice_ddbb.json b/src/database/slice_ddbb.json
similarity index 100%
rename from src/slice_ddbb.json
rename to src/database/slice_ddbb.json
diff --git a/src/database/store_data.py b/src/database/store_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..5040314b4c3cb67d798d3eae5f6bf81cadbe141d
--- /dev/null
+++ b/src/database/store_data.py
@@ -0,0 +1,44 @@
+import json, os
+from src.config.constants import DATABASE_PATH
+
+def store_data(intent, slice_id, controller_type=None):
+ """
+ Store network slice intent information in a JSON database file.
+
+ This method:
+ 1. Creates a JSON file if it doesn't exist
+ 2. Reads existing content
+ 3. Updates or adds new slice intent information
+
+ Args:
+ intent (dict): Network slice intent to be stored
+ slice_id (str, optional): Existing slice ID to update. Defaults to None.
+ """
+ file_path = os.path.join(DATABASE_PATH, "slice_ddbb.json")
+ # Create initial JSON file if it doesn't exist
+ if not os.path.exists(file_path):
+ with open(file_path, 'w') as file:
+ json.dump([], file, indent=4)
+
+ # Read existing content
+ with open(file_path, 'r') as file:
+ content = json.load(file)
+
+ # Update or add new slice intent
+ if slice_id:
+ # Update existing slice intent
+ for slice in content:
+ if slice["slice_id"] == slice_id:
+ slice["intent"] = intent
+ else:
+ # Add new slice intent
+ content.append(
+ {
+ "slice_id": intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"],
+ "intent": intent,
+ "controller": controller_type,
+ })
+
+ # # Write updated content back to file
+ with open(file_path, 'w') as file:
+ json.dump(content, file, indent=4)
\ No newline at end of file
diff --git a/src/helpers.py b/src/helpers.py
deleted file mode 100644
index 0e150791ac742c02c03aaa755c04a980481b4336..0000000000000000000000000000000000000000
--- a/src/helpers.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file includes original contributions from Telefonica Innovación Digital S.L.
-
-import logging, requests, json
-from netmiko import ConnectHandler
-from src.Constants import DEFAULT_LOGGING_LEVEL
-
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
- level=DEFAULT_LOGGING_LEVEL,
- format='%(levelname)s - %(message)s')
-
-#Teraflow
-class tfs_connector():
-
- def webui_post(self, tfs_ip, service):
- user="admin"
- password="admin"
- token=""
- session = requests.Session()
- session.auth = (user, password)
- url=f'http://{tfs_ip}/webui'
- response=session.get(url=url)
- for item in response.iter_lines():
- if("csrf_token" in str(item)):
- string=str(item).split(' nrp_slo["bound"]:
+ return False, 0 # Does not meet minimum constraint
+ flexibility_scores.append(flexibility)
+ break # Exit inner loop after finding matching metric
+
+ # Calculate final viability score
+ score = sum(flexibility_scores) / len(flexibility_scores) if flexibility_scores else 0
+ return True, score # Si pasó todas las verificaciones, la NRP es viable
\ No newline at end of file
diff --git a/src/nbi_processor/detect_format.py b/src/nbi_processor/detect_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..290e197143b177b3683f2e762ae0d75a963ebcf5
--- /dev/null
+++ b/src/nbi_processor/detect_format.py
@@ -0,0 +1,24 @@
+def detect_format(json_data):
+ """
+ Detect the format of the input network slice intent.
+
+ This method identifies whether the input JSON is in 3GPP or IETF format
+ by checking for specific keys in the JSON structure.
+
+ Args:
+ json_data (dict): Input network slice intent JSON
+
+ Returns:
+ str or None:
+ - "IETF" if IETF-specific keys are found
+ - "3GPP" if 3GPP-specific keys are found
+ - None if no recognizable format is detected
+ """
+ # Check for IETF-specific key
+ if "ietf-network-slice-service:network-slice-services" in json_data:
+ return "IETF"
+ # Check for 3GPP-specific keys
+ if any(key in json_data for key in ["NetworkSlice1", "TopSliceSubnet1", "CNSliceSubnet1", "RANSliceSubnet1"]):
+ return "3GPP"
+
+ return None
\ No newline at end of file
diff --git a/src/nbi_processor/main.py b/src/nbi_processor/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca1acd3db1612dd3fc6b2d9351b5abea431618bd
--- /dev/null
+++ b/src/nbi_processor/main.py
@@ -0,0 +1,41 @@
+import logging
+from .detect_format import detect_format
+from .translator import translator
+
+def nbi_processor(intent_json):
+ """
+ Process and translate network slice intents from different formats (3GPP or IETF).
+
+ This method detects the input JSON format and converts 3GPP intents to IETF format.
+ Supports multiple slice subnets in 3GPP format.
+
+ Args:
+ intent_json (dict): Input network slice intent in either 3GPP or IETF format.
+
+ Returns:
+ list: A list of IETF-formatted network slice intents.
+
+ Raises:
+ ValueError: If the JSON request format is not recognized.
+ """
+ # Detect the input JSON format (3GPP or IETF)
+ format = detect_format(intent_json)
+ ietf_intents = []
+
+ # TODO Needs to be generalized to support different names of slicesubnets
+ # Process different input formats
+ if format == "3GPP":
+ # Translate each subnet in 3GPP format to IETF format
+ for subnet in intent_json["RANSliceSubnet1"]["networkSliceSubnetRef"]:
+ ietf_intents.append(translator(intent_json, subnet))
+ logging.info(f"3GPP requests translated to IETF template")
+ elif format == "IETF":
+ # If already in IETF format, add directly
+ logging.info(f"IETF intent received")
+ ietf_intents.append(intent_json)
+ else:
+ # Handle unrecognized format
+ logging.error(f"JSON request format not recognized")
+ raise ValueError("JSON request format not recognized")
+
+ return ietf_intents or None
\ No newline at end of file
diff --git a/src/nbi_processor/translator.py b/src/nbi_processor/translator.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f1953a089e4356dae79db6b732575102389411c
--- /dev/null
+++ b/src/nbi_processor/translator.py
@@ -0,0 +1,91 @@
+import uuid, os
+from src.utils.load_template import load_template
+from src.config.constants import TEMPLATES_PATH
+
+def translator(gpp_intent, subnet):
+ """
+ Translate a 3GPP network slice intent to IETF format.
+
+ This method converts a 3GPP intent into a standardized IETF intent template,
+ mapping key parameters such as QoS profiles, service endpoints, and connection details.
+
+ Args:
+ gpp_intent (dict): Original 3GPP network slice intent
+ subnet (str): Specific subnet reference within the 3GPP intent
+
+ Returns:
+ dict: Translated IETF-formatted network slice intent
+
+ Notes:
+ - Generates a unique slice service ID using UUID
+ - Maps QoS requirements, source/destination endpoints
+ - Logs the translated intent to a JSON file for reference
+ """
+ # Load IETF template and create a copy to modify
+ ietf_i = load_template(os.path.join(TEMPLATES_PATH, "ietf_template_empty.json"))
+
+ # Extract endpoint transport objects
+ ep_transport_objects = gpp_intent[subnet]["EpTransport"]
+
+ # Populate template with SLOs (currently supporting QoS profile, latency and bandwidth)
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = gpp_intent[ep_transport_objects[0]]["qosProfile"]
+
+ profile = gpp_intent.get(subnet, {}).get("SliceProfileList", [{}])[0].get("RANSliceSubnetProfile", {})
+
+
+ metrics = {
+ ("uLThptPerSliceSubnet", "MaxThpt"): ("one-way-bandwidth", "kbps"),
+ ("uLLatency",): ("one-way-delay-maximum", "milliseconds"),
+ ("EnergyConsumption",): ("energy_consumption", "Joules"),
+ ("EnergyEfficiency",): ("energy_efficiency", "W/bps"),
+ ("CarbonEmissions",): ("carbon_emission", "gCO2eq"),
+ ("RenewableEnergyUsage",): ("renewable_energy_usage", "rate")
+ }
+
+ # Aux
+ def get_nested(d, keys):
+ for k in keys:
+ if isinstance(d, dict) and k in d:
+ d = d[k]
+ else:
+ return None
+ return d
+
+ for key_path, (metric_type, metric_unit) in metrics.items():
+ value = get_nested(profile, key_path)
+ if value is not None:
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]\
+ ["slo-sle-template"][0]["slo-policy"]["metric-bound"].append({
+ "metric-type": metric_type,
+ "metric-unit": metric_unit,
+ "bound": value
+ })
+
+
+ # Generate unique slice service ID and description
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] = f"slice-service-{uuid.uuid4()}"
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = f"Transport network slice mapped with 3GPP slice {next(iter(gpp_intent))}"
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["slo-sle-policy"]["slo-sle-template"] = ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
+
+ # Configure Source SDP
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["node-id"] = ep_transport_objects[0].split(" ", 1)[1]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[0]]["EpApplicationRef"][0]]["localAddress"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceType"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceId"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[0]]["IpAddress"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[0]]["NextHopInfo"]
+
+ # Configure Destination SDP
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["node-id"] = ep_transport_objects[1].split(" ", 1)[1]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[1]]["EpApplicationRef"][0]]["localAddress"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceType"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceId"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[1]]["IpAddress"]
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[1]]["NextHopInfo"]
+
+ # Configure Connection Group and match-criteria
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["connection-groups"]["connection-group"][0]["id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+ ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
+
+ return ietf_i
\ No newline at end of file
diff --git a/src/network_slice_controller.py b/src/network_slice_controller.py
deleted file mode 100644
index 6ac70885c872dda4d18e919a602abd4f1a15c870..0000000000000000000000000000000000000000
--- a/src/network_slice_controller.py
+++ /dev/null
@@ -1,1259 +0,0 @@
-# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file includes original contributions from Telefonica Innovación Digital S.L.
-
-import json, time, os, logging, uuid, traceback, sys
-from datetime import datetime
-from src.helpers import tfs_connector, cisco_connector
-from src.Constants import DEFAULT_LOGGING_LEVEL, TFS_IP, TFS_L2VPN_SUPPORT, IXIA_IP, SRC_PATH, TEMPLATES_PATH, DUMMY_MODE, DUMP_TEMPLATES, PLANNER_ENABLED, NRP_ENABLED, UPLOAD_TYPE, NBI_L2_PATH, NBI_L3_PATH
-from src.realizers.ixia.NEII_V4 import NEII_controller
-from src.planner.planner import Planner
-
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
- level=DEFAULT_LOGGING_LEVEL,
- format='%(levelname)s - %(message)s')
-
-class NSController:
- """
- Network Slice Controller (NSC) - A class to manage network slice creation,
- modification, and deletion across different network domains.
-
- This controller handles the translation, mapping, and realization of network
- slice intents from different formats (3GPP and IETF) to network-specific
- configurations.
-
- Key Functionalities:
- - Intent Processing: Translate and process network slice intents
- - Slice Management: Create, modify, and delete network slices
- - NRP (Network Resource Partition) Mapping: Match slice requirements with available resources
- - Slice Realization: Convert intents to specific network configurations (L2VPN, L3VPN)
- """
-
- def __init__(self, controller_type = "TFS", tfs_ip=TFS_IP, ixia_ip =IXIA_IP, need_l2vpn_support=TFS_L2VPN_SUPPORT):
- """
- Initialize the Network Slice Controller.
-
- Args:
- controller_type (str): Flag to determine if configurations
- should be uploaded to Teraflow or IXIA system.
- need_l2vpn_support (bool, optional): Flag to determine if additional
- L2VPN configuration support is required. Defaults to False.
-
- Attributes:
- controller_type (str): Flag for Teraflow or Ixia upload
- answer (dict): Stores slice creation responses
- start_time (float): Tracks slice setup start time
- end_time (float): Tracks slice setup end time
- need_l2vpn_support (bool): Flag for additional L2VPN configuration support
- """
- self.controller_type = controller_type
- self.tfs_ip = tfs_ip
- self.path = ""
- self.answer = {}
- self.cool_answer = {}
- self.start_time = 0
- self.end_time = 0
- self.setup_time = 0
- self.need_l2vpn_support = need_l2vpn_support
- # Internal templates and views
- self.__gpp_template = ""
- self.__ietf_template = ""
- self.__teraflow_template = ""
- self.__nrp_view = ""
- self.subnet=""
-
- # API Methods
- def add_flow(self, intent):
- """
- Create a new transport network slice.
-
- Args:
- intent (dict): Network slice intent in 3GPP or IETF format
-
- Returns:
- Result of the Network Slice Controller (NSC) operation
-
- API Endpoint:
- POST /slice
-
- Raises:
- ValueError: If no transport network slices are found
- Exception: For unexpected errors during slice creation process
- """
- return self.nsc(intent)
-
- def get_flows(self,slice_id=None):
- """
- Retrieve transport network slice information.
-
- This method allows retrieving:
- - All transport network slices
- - A specific slice by its ID
-
- Args:
- slice_id (str, optional): Unique identifier of a specific slice.
- Defaults to None.
-
- Returns:
- dict or list:
- - If slice_id is provided: Returns the specific slice details
- - If slice_id is None: Returns a list of all slices
- - Returns an error response if no slices are found
-
- API Endpoint:
- GET /slice/{id}
-
- Raises:
- ValueError: If no transport network slices are found
- Exception: For unexpected errors during file processing
- """
- try:
- # Read slice database from JSON file
- with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'r') as file:
- content = json.load(file)
- # If specific slice ID is provided, find and return matching slice
- if slice_id:
- for slice in content:
- if slice["slice_id"] == slice_id:
- return slice
- # If no slices exist, raise an error
- if len(content) == 0:
- raise ValueError("Transport network slices not found")
-
- # Return all slices if no specific ID is given
- return [slice for slice in content if slice.get("controller") == self.controller_type]
-
- except ValueError as e:
- # Handle case where no slices are found
- return self.__send_response(False, code=404, message=str(e))
- except Exception as e:
- # Handle unexpected errors
- return self.__send_response(False, code=500, message=str(e))
-
- def modify_flow(self,slice_id, intent):
- """
- Modify an existing transport network slice.
-
- Args:
- slice_id (str): Unique identifier of the slice to modify
- intent (dict): New intent configuration for the slice
-
- Returns:
- Result of the Network Slice Controller (NSC) operation
-
- API Endpoint:
- PUT /slice/{id}
- """
- return self.nsc(intent, slice_id)
-
- def delete_flows(self, slice_id=None):
- """
- Delete transport network slice(s).
-
- This method supports:
- - Deleting a specific slice by ID
- - Deleting all slices
- - Optional cleanup of L2VPN configurations
-
- Args:
- slice_id (str, optional): Unique identifier of slice to delete.
- Defaults to None.
-
- Returns:
- dict: Response indicating successful deletion or error details
-
- API Endpoint:
- DELETE /slice/{id}
-
- Raises:
- ValueError: If no slices are found to delete
- Exception: For unexpected errors during deletion process
-
- Notes:
- - If controller_type is TFS, attempts to delete from Teraflow
- - If need_l2vpn_support is True, performs additional L2VPN cleanup
- """
- try:
- # Read current slice database
- with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'r') as file:
- content = json.load(file)
- id = None
-
- # Delete specific slice if slice_id is provided
- if slice_id:
- for i, slice in enumerate(content):
- if slice["slice_id"] == slice_id and slice.get("controller") == self.controller_type:
- del content[i]
- id = i
- break
- # Raise error if slice not found
- if id is None:
- raise ValueError("Transport network slice not found")
- # Update slice database
- with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'w') as file:
- json.dump(content, file, indent=4)
- logging.info(f"Slice {slice_id} removed successfully")
- return self.__send_response(False, code=200, status="success", message=f"Transpor network slice {slice_id} deleted successfully")
-
- # Delete all slices
- else:
- # Optional: Delete in Teraflow if configured
- if self.controller_type == "TFS":
- # TODO: should send a delete request to Teraflow
- if self.need_l2vpn_support:
- self.__tfs_l2vpn_delete()
-
- data_removed = [slice for slice in content if slice.get("controller") == self.controller_type]
-
- # Verify slices exist before deletion
- if len(data_removed) == 0:
- raise ValueError("Transport network slices not found")
-
- filtered_data = [slice for slice in content if slice.get("controller") != self.controller_type]
- # Clear slice database
- with open(os.path.join(SRC_PATH, "slice_ddbb.json"), 'w') as file:
- json.dump(filtered_data, file, indent=4)
-
- logging.info("All slices removed successfully")
- return self.__send_response(False, code=200, status="success", message="All transport network slices deleted successfully.")
-
- except ValueError as e:
- return self.__send_response(False, code=404, message=str(e))
- except Exception as e:
- return self.__send_response(False, code=500, message=str(e))
-
- # Main NSC Functionalities
- def nsc(self, intent_json, slice_id=None):
- """
- Main Network Slice Controller method to process and realize network slice intents.
-
- Workflow:
- 1. Load IETF template
- 2. Process intent (detect format, translate if needed)
- 3. Extract slice data
- 4. Store slice information
- 5. Map slice to Network Resource Pool (NRP)
- 6. Realize slice configuration
- 7. Upload to Teraflow (optional)
-
- Args:
- intent_json (dict): Network slice intent in 3GPP or IETF format
- slice_id (str, optional): Existing slice identifier for modification
-
- Returns:
- tuple: Response status and HTTP status code
-
- """
- try:
- # Start performance tracking
- self.start_time = time.perf_counter()
-
- # Reset requests and load IETF template
- self.__load_template(1, os.path.join(TEMPLATES_PATH, "ietf_template_empty.json"))
- requests = {"services":[]}
-
- # Store the received template for debugging
- if DUMP_TEMPLATES:
- with open(os.path.join(TEMPLATES_PATH, "nbi_template.json"), "w") as file:
- file.write(json.dumps(intent_json,indent=2))
-
- # Process intent (translate if 3GPP)
- ietf_intents = self.__nbi_processor(intent_json)
-
- # Store the generated template for debugging
- if DUMP_TEMPLATES:
- with open(os.path.join(TEMPLATES_PATH, "ietf_template.json"), "w") as file:
- file.write(json.dumps(ietf_intents,indent=2))
-
- if ietf_intents:
- for intent in ietf_intents:
- # Extract and store slice request details
- self.__extract_data(intent)
- self.__store_data(intent, slice_id)
- # Mapper
- self.__mapper(intent)
- # Realizer
- tfs_request = self.__realizer(intent)
- requests["services"].append(tfs_request)
- else:
- return self.__send_response(False, code=404, message="No intents found")
-
- # Store the generated template for debugging
- if DUMP_TEMPLATES:
- with open(os.path.join(TEMPLATES_PATH, "realizer_template.json"), "w") as archivo:
- archivo.write(json.dumps(requests,indent=2))
-
- # Optional: Upload template to Teraflow
- if not DUMMY_MODE:
- if self.controller_type == "TFS":
- if UPLOAD_TYPE == "WEBUI":
- response = tfs_connector().webui_post(self.tfs_ip, requests)
- elif UPLOAD_TYPE == "NBI":
- for intent in requests["services"]:
- # Send each separate NBI request
- response = tfs_connector().nbi_post(self.tfs_ip, intent, self.path)
-
- if not response.ok:
- return self.__send_response(False, code=response.status_code, message=f"Teraflow upload failed. Response: {response.text}")
-
- # For deploying an L2VPN with path selection (not supported by Teraflow)
- if self.need_l2vpn_support:
- self.__tfs_l2vpn_support(requests["services"])
-
- logging.info("Request sent to Teraflow")
- elif self.controller_type == "IXIA":
- neii_controller = NEII_controller()
- for intent in requests["services"]:
- # Send each separate IXIA request
- neii_controller.nscNEII(intent)
- logging.info("Requests sent to Ixia")
-
- # End performance tracking
- self.end_time = time.perf_counter()
- return self.__send_response(True, code=200)
-
- except ValueError as e:
- return self.__send_response(False, code=400, message=str(e))
- except Exception as e:
- return self.__send_response(False, code=500, message=str(e))
-
- def __nbi_processor(self, intent_json):
- """
- Process and translate network slice intents from different formats (3GPP or IETF).
-
- This method detects the input JSON format and converts 3GPP intents to IETF format.
- Supports multiple slice subnets in 3GPP format.
-
- Args:
- intent_json (dict): Input network slice intent in either 3GPP or IETF format.
-
- Returns:
- list: A list of IETF-formatted network slice intents.
-
- Raises:
- ValueError: If the JSON request format is not recognized.
- """
- # Detect the input JSON format (3GPP or IETF)
- format = self.__detect_format(intent_json)
- ietf_intents = []
-
- # TODO Needs to be generalized to support different names of slicesubnets
- # Process different input formats
- if format == "3GPP":
- # Translate each subnet in 3GPP format to IETF format
- for subnet in intent_json["RANSliceSubnet1"]["networkSliceSubnetRef"]:
- ietf_intents.append(self.__translator(intent_json, subnet))
- logging.info(f"3GPP requests translated to IETF template")
- elif format == "IETF":
- # If already in IETF format, add directly
- logging.info(f"IETF intent received")
- ietf_intents.append(intent_json)
- else:
- # Handle unrecognized format
- logging.error(f"JSON request format not recognized")
- raise ValueError("JSON request format not recognized")
-
- return ietf_intents
-
- def __mapper(self, ietf_intent):
- """
- Map an IETF network slice intent to the most suitable Network Resource Partition (NRP).
-
- This method:
- 1. Retrieves the current NRP view
- 2. Extracts Service Level Objectives (SLOs) from the intent
- 3. Finds NRPs that can meet the SLO requirements
- 4. Selects the best NRP based on viability and availability
- 5. Attaches the slice to the selected NRP or creates a new one
-
- Args:
- ietf_intent (dict): IETF-formatted network slice intent.
-
- Raises:
- Exception: If no suitable NRP is found and slice creation fails.
- """
- if NRP_ENABLED:
- # Retrieve NRP view
- self.__realizer(None, True, "READ")
-
- # Extract Service Level Objectives (SLOs) from the intent
- slos = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
-
- if slos:
- # Find candidate NRPs that can meet the SLO requirements
- candidates = [
- (nrp, self.__slo_viability(slos, nrp)[1])
- for nrp in self.__nrp_view
- if self.__slo_viability(slos, nrp)[0] and nrp["available"]
- ]
- logging.debug(f"Candidates: {candidates}")
-
- # Select the best NRP based on candidates
- best_nrp = max(candidates, key=lambda x: x[1])[0] if candidates else None
- logging.debug(f"Best NRP: {best_nrp}")
-
- if best_nrp:
- best_nrp["slices"].append(ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"])
- # Update NRP view
- self.__realizer(ietf_intent, True, "UPDATE")
- # TODO Here we should put how the slice is attached to an already created nrp
- else:
- # Request the controller to create a new NRP that meets the SLOs
- answer = self.__realizer(ietf_intent, True, "CREATE", best_nrp)
- if not answer:
- raise Exception("Slice rejected due to lack of NRPs")
- # TODO Here we should put how the slice is attached to the new nrp
-
- if PLANNER_ENABLED:
- optimal_path = Planner().planner(ietf_intent)
-
- logging.info(f"Optimal path: {optimal_path}")
-
- def __realizer(self, ietf_intent, need_nrp=False, order=None, nrp=None):
- """
- Manage the slice creation workflow.
-
- This method handles two primary scenarios:
- 1. Interact with network controllers for NRP (Network Resource Partition) operations when need_nrp is True
- 2. Slice service selection when need_nrp is False
-
- Args:
- ietf_intent (dict): IETF-formatted network slice intent.
- need_nrp (bool, optional): Flag to indicate if NRP operations are needed. Defaults to False.
- order (str, optional): Type of NRP operation (READ, UPDATE, CREATE). Defaults to None.
- nrp (dict, optional): Specific Network Resource Partition to operate on. Defaults to None.
- """
- if need_nrp:
- # Perform NRP-related operations
- self.__nrp(order, nrp)
- else:
- # Select slice service method
- way = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"]["tag-type"]["value"]
- way = "L3VPN"
- return self.__select_way(controller=self.controller_type, way=way, ietf_intent=ietf_intent)
-
- ### Generic functionalities
- def __load_template(self, which, dir_t):
- """
- Load and process JSON templates for different network slice formats.
-
- Args:
- which (int): Template selector (0: 3GPP, 1: IETF, other: Teraflow)
- dir_t (str): Directory path to the template file
- """
- try:
- # Open and read the template file
- with open(dir_t, 'r') as source:
- # Clean up the JSON template
- template = source.read().replace('\t', '').replace('\n', '').replace("'", '"').strip()
-
- # Store template based on selector
- if which == 0:
- self.__gpp_template = template
- elif which == 1:
- self.__ietf_template = template
- else:
- self.__teraflow_template = template
-
- except Exception as e:
- logging.error(f"Template loading error: {e}")
- return self.__send_response(False, code=500, message=f"Template loading error: {e}")
-
- def __send_response(self, result, status="error", message=None, code=None):
- """
- Generate and send a response to the 3GPP client about the slice request.
-
- Args:
- result (bool): Indicates whether the slice request was successful
- status (str, optional): Response status. Defaults to "error"
- message (str, optional): Additional error message. Defaults to None
- code (str, optional): Response code. Defaults to None
-
- Returns:
- tuple: A tuple containing the response dictionary and status code
- """
- if result:
- # Successful slice creation
- logging.info("Your slice request was fulfilled sucessfully")
- self.setup_time = (self.end_time - self.start_time)*1000
- logging.info(f"Setup time: {self.setup_time:.2f}")
-
- # Construct detailed successful response
- answer = {
- "status": "success",
- "code": code,
- "slices": [],
- "setup_time": self.setup_time
- }
- # Add slice details to the response
- for subnet in self.answer:
- slice_info = {
- "id": subnet,
- "source": self.answer[subnet]["Source"],
- "destination": self.answer[subnet]["Destination"],
- "vlan": self.answer[subnet]["VLAN"],
- "requirements": self.answer[subnet]["QoS Requirements"],
- }
- answer["slices"].append(slice_info)
- self.cool_answer = answer
- else:
- # Failed slice creation
- logging.info("Your request cannot be fulfilled. Reason: "+message)
- self.cool_answer = {
- "status" :status,
- "code": code,
- "message": message
- }
- return self.cool_answer, code
-
- def __extract_data(self, intent_json):
- """
- Extract source and destination IP addresses from the IETF intent.
-
- Args:
- intent_json (dict): IETF-formatted network slice intent
- """
- # Extract source and destination IP addresses
- source = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"]
- destination = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"]
-
- logging.info(f"Intent generated between {source} and {destination}")
-
- # Store slice and connection details
- self.subnet = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
- self.subnet = intent_json["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
- self.answer[self.subnet] = {
- "Source": source,
- "Destination": destination
- }
-
- def __store_data(self, intent, slice_id):
- """
- Store network slice intent information in a JSON database file.
-
- This method:
- 1. Creates a JSON file if it doesn't exist
- 2. Reads existing content
- 3. Updates or adds new slice intent information
-
- Args:
- intent (dict): Network slice intent to be stored
- slice_id (str, optional): Existing slice ID to update. Defaults to None.
- """
- file_path = os.path.join(SRC_PATH, "slice_ddbb.json")
- # Create initial JSON file if it doesn't exist
- if not os.path.exists(file_path):
- with open(file_path, 'w') as file:
- json.dump([], file, indent=4)
-
- # Read existing content
- with open(file_path, 'r') as file:
- content = json.load(file)
-
- # Update or add new slice intent
- if slice_id:
- # Update existing slice intent
- for slice in content:
- if slice["slice_id"] == slice_id:
- slice["intent"] = intent
- else:
- # Add new slice intent
- content.append(
- {
- "slice_id": intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"],
- "intent": intent,
- "controller": self.controller_type,
- })
-
- # # Write updated content back to file
- with open(file_path, 'w') as file:
- json.dump(content, file, indent=4)
-
- ### NBI processor functionalities
- def __detect_format(self,json_data):
- """
- Detect the format of the input network slice intent.
-
- This method identifies whether the input JSON is in 3GPP or IETF format
- by checking for specific keys in the JSON structure.
-
- Args:
- json_data (dict): Input network slice intent JSON
-
- Returns:
- str or None:
- - "IETF" if IETF-specific keys are found
- - "3GPP" if 3GPP-specific keys are found
- - None if no recognizable format is detected
- """
- # Check for IETF-specific key
- if "ietf-network-slice-service:network-slice-services" in json_data:
- return "IETF"
- # Check for 3GPP-specific keys
- if any(key in json_data for key in ["NetworkSlice1", "TopSliceSubnet1", "CNSliceSubnet1", "RANSliceSubnet1"]):
- return "3GPP"
-
- return None
-
- def __translator(self, gpp_intent, subnet):
- """
- Translate a 3GPP network slice intent to IETF format.
-
- This method converts a 3GPP intent into a standardized IETF intent template,
- mapping key parameters such as QoS profiles, service endpoints, and connection details.
-
- Args:
- gpp_intent (dict): Original 3GPP network slice intent
- subnet (str): Specific subnet reference within the 3GPP intent
-
- Returns:
- dict: Translated IETF-formatted network slice intent
-
- Notes:
- - Generates a unique slice service ID using UUID
- - Maps QoS requirements, source/destination endpoints
- - Logs the translated intent to a JSON file for reference
- """
- # Load IETF template and create a copy to modify
- ietf_i = json.loads(str(self.__ietf_template))
-
- # Extract endpoint transport objects
- ep_transport_objects = gpp_intent[subnet]["EpTransport"]
-
- # Populate template with SLOs (currently supporting QoS profile, latency and bandwidth)
- ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = gpp_intent[ep_transport_objects[0]]["qosProfile"]
-
- profile = gpp_intent.get(subnet, {}).get("SliceProfileList", [{}])[0].get("RANSliceSubnetProfile", {})
-
-
- metrics = {
- ("uLThptPerSliceSubnet", "MaxThpt"): ("one-way-bandwidth", "kbps"),
- ("uLLatency",): ("one-way-delay-maximum", "milliseconds"),
- ("EnergyConsumption",): ("energy_consumption", "Joules"),
- ("EnergyEfficiency",): ("energy_efficiency", "W/bps"),
- ("CarbonEmissions",): ("carbon_emission", "gCO2eq"),
- ("RenewableEnergyUsage",): ("renewable_energy_usage", "rate")
- }
-
- # Aux
- def get_nested(d, keys):
- for k in keys:
- if isinstance(d, dict) and k in d:
- d = d[k]
- else:
- return None
- return d
-
- for key_path, (metric_type, metric_unit) in metrics.items():
- value = get_nested(profile, key_path)
- if value is not None:
- ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]\
- ["slo-sle-template"][0]["slo-policy"]["metric-bound"].append({
- "metric-type": metric_type,
- "metric-unit": metric_unit,
- "bound": value
- })
-
-
- # Generate unique slice service ID and description
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] = f"slice-service-{uuid.uuid4()}"
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = f"Transport network slice mapped with 3GPP slice {next(iter(gpp_intent))}"
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["slo-sle-policy"]["slo-sle-template"] = ietf_i["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
-
- # Configure Source SDP
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["node-id"] = ep_transport_objects[0].split(" ", 1)[1]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[0]]["EpApplicationRef"][0]]["localAddress"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceType"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[0]]["logicalInterfaceInfo"]["logicalInterfaceId"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[0]]["IpAddress"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[0]]["NextHopInfo"]
-
- # Configure Destination SDP
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["node-id"] = ep_transport_objects[1].split(" ", 1)[1]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["sdp-ip-address"] = gpp_intent[gpp_intent[ep_transport_objects[1]]["EpApplicationRef"][0]]["localAddress"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["match-type"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceType"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["value"] = gpp_intent[ep_transport_objects[1]]["logicalInterfaceInfo"]["logicalInterfaceId"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"] = gpp_intent[ep_transport_objects[1]]["IpAddress"]
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"] = gpp_intent[ep_transport_objects[1]]["NextHopInfo"]
-
- # Configure Connection Group and match-criteria
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["connection-groups"]["connection-group"][0]["id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
- ietf_i["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["service-match-criteria"]["match-criterion"][0]["target-connection-group-id"] = f"{ep_transport_objects[0].split(' ', 1)[1]}_{ep_transport_objects[1].split(' ', 1)[1]}"
-
- return ietf_i
-
- ### Mapper functionalities
- def __slo_viability(self, slice_slos, nrp_slos):
- """
- Compare Service Level Objectives (SLOs) between a slice and a Network Resource Partition (NRP).
-
- This method assesses whether an NRP can satisfy the SLOs of a network slice.
-
- Args:
- slice_slos (list): Service Level Objectives of the slice
- nrp_slos (dict): Service Level Objectives of the Network Resource Pool
-
- Returns:
- tuple: A boolean indicating viability and a flexibility score
- - First value: True if NRP meets SLOs, False otherwise
- - Second value: A score representing how well the NRP meets the SLOs
- """
- # Define SLO types for maximum and minimum constraints
- slo_type = {
- "max": ["one-way-delay-maximum", "two-way-delay-maximum", "one-way-delay-percentile", "two-way-delay-percentile",
- "one-way-delay-variation-maximum", "two-way-delay-variation-maximum",
- "one-way-delay-variation-percentile", "two-way-delay-variation-percentile",
- "one-way-packet-loss", "two-way-packet-loss"],
- "min": ["one-way-bandwidth", "two-way-bandwidth", "shared-bandwidth"]
- }
- flexibility_scores = []
- for slo in slice_slos:
- for nrp_slo in nrp_slos['slos']:
- if slo["metric-type"] == nrp_slo["metric-type"]:
- # Handle maximum type SLOs
- if slo["metric-type"] in slo_type["max"]:
- flexibility = (nrp_slo["bound"] - slo["bound"]) / slo["bound"]
- if slo["bound"] > nrp_slo["bound"]:
- return False, 0 # Does not meet maximum constraint
- # Handle minimum type SLOs
- if slo["metric-type"] in slo_type["min"]:
- flexibility = (slo["bound"] - nrp_slo["bound"]) / slo["bound"]
- if slo["bound"] < nrp_slo["bound"]:
- return False, 0 # Does not meet minimum constraint
- flexibility_scores.append(flexibility)
- break # Exit inner loop after finding matching metric
-
- # Calculate final viability score
- score = sum(flexibility_scores) / len(flexibility_scores) if flexibility_scores else 0
- return True, score # Si pasó todas las verificaciones, la NRP es viable
-
- ### Realizer functionalities.
- def __nrp(self, request, nrp):
- """
- Manage Network Resource Partition (NRP) operations.
-
- This method handles CRUD operations for Network Resource Partitions,
- interacting with Network Controllers (currently done statically via a JSON-based database file).
-
- Args:
- request (str): The type of operation to perform.
- Supported values:
- - "CREATE": Add a new NRP to the database
- - "READ": Retrieve the current NRP view
- - "UPDATE": Update an existing NRP (currently a placeholder)
-
- nrp (dict): The Network Resource Partition details to create or update.
-
- Returns:
- None or answer:
- - For "CREATE": Returns the response from the controller (currently using a static JSON)
- - For "READ": Gets the NRP view from the controller (currently using a static JSON)
- - For "UPDATE": Placeholder for update functionality
-
- Notes:
- - Uses a local JSON file "nrp_ddbb.json" to store NRP information as controller operation is not yet defined
- """
- if request == "CREATE":
- # TODO: Implement actual request to Controller to create an NRP
- logging.debug("Creating NRP")
-
- # Load existing NRP database
- with open(os.path.join(SRC_PATH, "nrp_ddbb.json"), "r") as archivo:
- self.__nrp_view = json.load(archivo)
-
- # Append new NRP to the view
- self.__nrp_view.append(nrp)
-
- # Placeholder for controller POST request
- answer = None
- return answer
- elif request == "READ":
- # TODO: Request to Controller to get topology and current NRP view
- logging.debug("Reading Topology")
-
- # Load NRP database
- with open(os.path.join(SRC_PATH, "nrp_ddbb.json"), "r") as archivo:
- self.__nrp_view = json.load(archivo)
-
- elif request == "UPDATE":
- # TODO: Implement request to Controller to update NRP
- logging.debug("Updating NRP")
- answer = ""
-
- def __select_way(self, controller=None, way=None, ietf_intent=None):
- """
- Determine the method of slice realization.
-
- Args:
- controller (str): The controller to use for slice realization.
- Supported values:
- - "IXIA": IXIA NEII for network testing
- - "TFS": TeraFlow Service for network slice management
- way (str): The type of technology to use.
- Supported values:
- - "L2VPN": Layer 2 Virtual Private Network
- - "L3VPN": Layer 3 Virtual Private Network
-
- ietf_intent (dict): IETF-formatted network slice intent.
-
- Returns:
- dict: A realization request for the specified network slice type.
-
- """
- realizing_request = None
- if controller == "TFS":
- if way == "L2VPN":
- realizing_request = self.__tfs_l2vpn(ietf_intent)
- elif way == "L3VPN":
- realizing_request = self.__tfs_l3vpn(ietf_intent)
- else:
- logging.warning(f"Unsupported way: {way}. Defaulting to L2VPN realization.")
- realizing_request = self.__tfs_l2vpn(ietf_intent)
- elif controller == "IXIA":
- realizing_request = self.__ixia(ietf_intent)
- else:
- logging.warning(f"Unsupported controller: {controller}. Defaulting to TFS L2VPN realization.")
- realizing_request = self.__tfs_l2vpn(ietf_intent)
- return realizing_request
-
- def __tfs_l2vpn(self, ietf_intent):
- """
- Translate slice intent into a TeraFlow service request.
-
- This method prepares a L2VPN service request by:
- 1. Defining endpoint routers
- 2. Loading a service template
- 3. Generating a unique service UUID
- 4. Configuring service endpoints
- 5. Adding QoS constraints
- 6. Preparing configuration rules for network interfaces
-
- Args:
- ietf_intent (dict): IETF-formatted network slice intent.
-
- Returns:
- dict: A TeraFlow service request for L2VPN configuration.
-
- """
- # Hardcoded router endpoints
- # TODO (should be dynamically determined)
- origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
- origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
- destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
- destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-
- # Extract QoS Profile from intent
- QoSProfile = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
- vlan_value = 0
-
- self.answer[self.subnet]["QoS Requirements"] = []
-
- # Populate response with QoS requirements and VLAN from intent
- slo_policy = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]
-
- # Process metrics
- for metric in slo_policy.get("metric-bound", []):
- constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
- constraint_value = str(metric["bound"])
- self.answer[self.subnet]["QoS Requirements"].append({
- "constraint_type": constraint_type,
- "constraint_value": constraint_value
- })
-
- # Availability
- if "availability" in slo_policy:
- self.answer[self.subnet]["QoS Requirements"].append({
- "constraint_type": "availability[%]",
- "constraint_value": str(slo_policy["availability"])
- })
-
- # MTU
- if "mtu" in slo_policy:
- self.answer[self.subnet]["QoS Requirements"].append({
- "constraint_type": "mtu[bytes]",
- "constraint_value": str(slo_policy["mtu"])
- })
-
- # VLAN
- vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
- self.answer[self.subnet]["VLAN"] = vlan_value
-
- if UPLOAD_TYPE == "WEBUI":
- # Load L2VPN service template
- self.__load_template(2, os.path.join(TEMPLATES_PATH, "L2-VPN_template_empty.json"))
- tfs_request = json.loads(str(self.__teraflow_template))["services"][0]
-
- # Generate unique service UUID
- tfs_request["service_id"]["service_uuid"]["uuid"] += "-" + str(int(datetime.now().timestamp() * 1e7))
-
- # Configure service endpoints
- for endpoint in tfs_request["service_endpoint_ids"]:
- endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
- endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
-
- # Add service constraints
- for constraint in self.answer[self.subnet]["QoS Requirements"]:
- tfs_request["service_constraints"].append({"custom": constraint})
-
- # Add configuration rules
- for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
- router_id = origin_router_id if i == 1 else destination_router_id
- router_if = origin_router_if if i == 1 else destination_router_if
- resource_value = config_rule["custom"]["resource_value"]
-
- sdp_index = i - 1
- vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
- if vlan_value:
- resource_value["vlan_id"] = int(vlan_value)
- resource_value["circuit_id"] = vlan_value
- resource_value["remote_router"] = destination_router_id if i == 1 else origin_router_id
- resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
- config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
-
- elif UPLOAD_TYPE == "NBI":
- self.path = NBI_L2_PATH
- # Load IETF L2VPN service template
- self.__load_template(2, os.path.join(TEMPLATES_PATH, "ietfL2VPN_template_empty.json"))
- tfs_request = json.loads(str(self.__teraflow_template))
-
- # Generate service UUID
- full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
- uuid_only = full_id.split("slice-service-")[-1]
- tfs_request["ietf-l2vpn-svc:vpn-service"][0]["vpn-id"] = uuid_only
-
- # Configure service endpoints
- sites = tfs_request["ietf-l2vpn-svc:vpn-service"][0]["site"]
- sdps = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"]
-
- for i, site in enumerate(sites):
- is_origin = (i == 0)
- router_id = origin_router_id if is_origin else destination_router_id
- sdp = sdps[0] if is_origin else sdps[1]
- site["site-id"] = router_id
- site["site-location"] = sdp["node-id"]
- site["site-network-access"]["interface"]["ip-address"] = sdp["sdp-ip-address"]
-
- logging.info(f"L2VPN Intent realized\n")
- return tfs_request
-
- def __tfs_l2vpn_support(self, requests):
- """
- Configuration support for L2VPN with path selection based on MPLS traffic-engineering tunnels
-
- Args:
- requests (list): A list of configuration parameters.
-
- """
- sources={
- "source": "10.60.125.44",
- "config":[]
- }
- destinations={
- "destination": "10.60.125.45",
- "config":[]
- }
- for request in requests:
- # Configure Source Endpoint
- temp_source = request["service_config"]["config_rules"][1]["custom"]["resource_value"]
- endpoints = request["service_endpoint_ids"]
- config = {
- "ni_name": temp_source["ni_name"],
- "remote_router": temp_source["remote_router"],
- "interface": endpoints[0]["endpoint_uuid"]["uuid"].replace("0/0/0-", ""),
- "vlan" : temp_source["vlan_id"],
- "number" : temp_source["vlan_id"] % 10 + 1
- }
- sources["config"].append(config)
-
- # Configure Destination Endpoint
- temp_destiny = request["service_config"]["config_rules"][2]["custom"]["resource_value"]
- config = {
- "ni_name": temp_destiny["ni_name"],
- "remote_router": temp_destiny["remote_router"],
- "interface": endpoints[1]["endpoint_uuid"]["uuid"].replace("0/0/3-", ""),
- "vlan" : temp_destiny["vlan_id"],
- "number" : temp_destiny["vlan_id"] % 10 + 1
- }
- destinations["config"].append(config)
-
- #cisco_source = cisco_connector(source_address, ni_name, remote_router, vlan, vlan % 10 + 1)
- cisco_source = cisco_connector(sources["source"], sources["config"])
- commands = cisco_source.full_create_command_template()
- cisco_source.execute_commands(commands)
-
- #cisco_destiny = cisco_connector(destination_address, ni_name, remote_router, vlan, vlan % 10 + 1)
- cisco_destiny = cisco_connector(destinations["destination"], destinations["config"])
- commands = cisco_destiny.full_create_command_template()
- cisco_destiny.execute_commands(commands)
-
- def __tfs_l2vpn_delete(self):
- """
- Delete L2VPN configurations from Cisco devices.
-
- This method removes L2VPN configurations from Cisco routers
-
- Notes:
- - Uses cisco_connector to generate and execute deletion commands
- - Clears Network Interface (NI) settings
- """
- # Delete Source Endpoint Configuration
- source_address = "10.60.125.44"
- cisco_source = cisco_connector(source_address)
- cisco_source.execute_commands(cisco_source.create_command_template_delete())
-
- # Delete Destination Endpoint Configuration
- destination_address = "10.60.125.45"
- cisco_destiny = cisco_connector(destination_address)
- cisco_destiny.execute_commands(cisco_destiny.create_command_template_delete())
-
- def __tfs_l3vpn(self, ietf_intent):
- """
- Translate L3VPN (Layer 3 Virtual Private Network) intent into a TeraFlow service request.
-
- Similar to __tfs_l2vpn, but configured for Layer 3 VPN:
- 1. Defines endpoint routers
- 2. Loads service template
- 3. Generates unique service UUID
- 4. Configures service endpoints
- 5. Adds QoS constraints
- 6. Prepares configuration rules for network interfaces
-
- Args:
- ietf_intent (dict): IETF-formatted network slice intent.
-
- Returns:
- dict: A TeraFlow service request for L3VPN configuration.
- """
- # Hardcoded router endpoints
- # TODO (should be dynamically determined)
- origin_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
- origin_router_if = '0/0/0-GigabitEthernet0/0/0/0'
- destination_router_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][1]["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"]
- destination_router_if = '0/0/0-GigabitEthernet0/0/0/0'
-
- # Extract QoS Profile from intent
- QoSProfile = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"]
- vlan_value = 0
-
- self.answer[self.subnet]["QoS Requirements"] = []
-
- # Populate response with QoS requirements and VLAN from intent
- slo_policy = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]
-
- # Process metrics
- for metric in slo_policy.get("metric-bound", []):
- constraint_type = f"{metric['metric-type']}[{metric['metric-unit']}]"
- constraint_value = str(metric["bound"])
- self.answer[self.subnet]["QoS Requirements"].append({
- "constraint_type": constraint_type,
- "constraint_value": constraint_value
- })
-
- # Availability
- if "availability" in slo_policy:
- self.answer[self.subnet]["QoS Requirements"].append({
- "constraint_type": "availability[%]",
- "constraint_value": str(slo_policy["availability"])
- })
-
- # MTU
- if "mtu" in slo_policy:
- self.answer[self.subnet]["QoS Requirements"].append({
- "constraint_type": "mtu[bytes]",
- "constraint_value": str(slo_policy["mtu"])
- })
-
- # VLAN
- vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
- self.answer[self.subnet]["VLAN"] = vlan_value
-
- if UPLOAD_TYPE == "WEBUI":
- # Load L3VPN service template
- self.__load_template(2, os.path.join(TEMPLATES_PATH, "L3-VPN_template_empty.json"))
- tfs_request = json.loads(str(self.__teraflow_template))["services"][0]
-
- # Generate unique service UUID
- tfs_request["service_id"]["service_uuid"]["uuid"] += "-" + str(int(datetime.now().timestamp() * 1e7))
-
- # Configure service endpoints
- for endpoint in tfs_request["service_endpoint_ids"]:
- endpoint["device_id"]["device_uuid"]["uuid"] = origin_router_id if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_id
- endpoint["endpoint_uuid"]["uuid"] = origin_router_if if endpoint is tfs_request["service_endpoint_ids"][0] else destination_router_if
-
- # Add service constraints
- for constraint in self.answer[self.subnet]["QoS Requirements"]:
- tfs_request["service_constraints"].append({"custom": constraint})
-
- # Add configuration rules
- for i, config_rule in enumerate(tfs_request["service_config"]["config_rules"][1:], start=1):
- router_id = origin_router_id if i == 1 else destination_router_id
- router_if = origin_router_if if i == 1 else destination_router_if
- resource_value = config_rule["custom"]["resource_value"]
-
- sdp_index = i - 1
- vlan_value = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["service-match-criteria"]["match-criterion"][0]["value"]
- resource_value["router_id"] = destination_router_id if i == 1 else origin_router_id
- resource_value["vlan_id"] = int(vlan_value)
- resource_value["address_ip"] = destination_router_id if i == 1 else origin_router_id
- resource_value["policy_AZ"] = "policyA"
- resource_value["policy_ZA"] = "policyB"
- resource_value["ni_name"] = 'ELAN{:s}'.format(str(vlan_value))
- config_rule["custom"]["resource_key"] = f"/device[{router_id}]/endpoint[{router_if}]/settings"
-
- elif UPLOAD_TYPE == "NBI":
- self.path = NBI_L3_PATH
- # Load IETF L3VPN service template
- self.__load_template(2, os.path.join(TEMPLATES_PATH, "ietfL3VPN_template_empty.json"))
- tfs_request = json.loads(str(self.__teraflow_template))
-
- # Generate service UUID
- full_id = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"]
- tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["vpn-services"]["vpn-service"][0]["vpn-id"] = full_id
- # Configure service endpoints
- for i, site in enumerate(tfs_request["ietf-l3vpn-svc:l3vpn-svc"]["sites"]["site"]):
-
- # Determine if origin or destination
- is_origin = (i == 0)
- sdp_index = 0 if is_origin else 1
- location = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][sdp_index]["node-id"]
- router_id = origin_router_id if is_origin else destination_router_id
- router_if = origin_router_if if is_origin else destination_router_if
-
- # Assign common values
- site["site-id"] = f"site_{location}"
- site["locations"]["location"][0]["location-id"] = location
- site["devices"]["device"][0]["device-id"] = router_id
- site["devices"]["device"][0]["location"] = location
-
- access = site["site-network-accesses"]["site-network-access"][0]
- access["site-network-access-id"] = router_if
- access["device-reference"] = router_id
- access["vpn-attachment"]["vpn-id"] = full_id
-
- # Aplicar restricciones QoS
- for constraint in self.answer[self.subnet]["QoS Requirements"]:
- ctype = constraint["constraint_type"]
- cvalue = float(constraint["constraint_value"])
- if constraint["constraint_type"].startswith("one-way-bandwidth"):
- unit = constraint["constraint_type"].split("[")[-1].rstrip("]")
- multiplier = {"bps": 1, "kbps": 1_000, "Mbps": 1_000_000, "Gbps": 1_000_000_000}.get(unit, 1)
- value = int(cvalue * multiplier)
- access["service"]["svc-input-bandwidth"] = value
- access["service"]["svc-output-bandwidth"] = value
- elif ctype == "one-way-delay-maximum[milliseconds]":
- access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["latency"]["latency-boundary"] = int(cvalue)
- elif ctype == "availability[%]":
- access["service"]["qos"]["qos-profile"]["classes"]["class"][0]["bandwidth"]["guaranteed-bw-percent"] = int(cvalue)
- elif ctype == "mtu[bytes]":
- access["service"]["svc-mtu"] = int(cvalue)
-
-
- logging.info(f"L3VPN Intent realized\n")
- self.answer[self.subnet]["VLAN"] = vlan_value
- return tfs_request
-
- def __ixia(self, ietf_intent):
- """
- Prepare an Ixia service request based on the IETF intent.
-
- This method configures an Ixia service request by:
- 1. Defining endpoint routers
- 2. Loading a service template
- 3. Generating a unique service UUID
- 4. Configuring service endpoints
- 5. Adding QoS constraints
-
- Args:
- ietf_intent (dict): IETF-formatted network slice intent.
-
- Returns:
- dict: An Ixia service request for configuration.
- """
- self.answer[self.subnet]["QoS Requirements"] = []
- # Add service constraints
- for i, constraint in enumerate(ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]):
- bound = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["bound"]
- metric_type = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["metric-type"]
- metric_unit = ietf_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"][i]["metric-unit"]
- service_constraint ={
- "custom": {
- "constraint_type": f"{metric_type}[{metric_unit}]",
- "constraint_value": f"{bound}"
- }
- }
- self.answer[self.subnet]["QoS Requirements"].append(service_constraint["custom"])
- self.answer[self.subnet]["VLAN"] = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["sdps"]["sdp"][0]["service-match-criteria"]["match-criterion"][0]["value"]
- # Extraer la lista de métricas de forma segura
- metric_bounds = ietf_intent.get("ietf-network-slice-service:network-slice-services", {}) \
- .get("slo-sle-templates", {}) \
- .get("slo-sle-template", [{}])[0] \
- .get("slo-policy", {}) \
- .get("metric-bound", [])
-
- # Inicializar valores
- bandwidth = None
- latency = None
- tolerance = None
-
- # Asignar valores según el tipo de métrica
- for metric in metric_bounds:
- metric_type = metric.get("metric-type")
- bound = metric.get("bound")
-
- if metric_type == "one-way-bandwidth":
- bandwidth = bound
- elif metric_type == "one-way-delay-maximum":
- latency = bound
- elif metric_type == "one-way-delay-variation-maximum":
- tolerance = bound
-
- # Construcción del diccionario intent
- intent = {
- "src_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
- .get("slice-service", [{}])[0]
- .get("sdps", {}).get("sdp", [{}])[0]
- .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
- .get("sdp-peering", {}).get("peer-sap-id"),
-
- "dst_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
- .get("slice-service", [{}])[0]
- .get("sdps", {}).get("sdp", [{}, {}])[1]
- .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
- .get("sdp-peering", {}).get("peer-sap-id"),
-
- "vlan_id": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
- .get("slice-service", [{}])[0]
- .get("sdps", {}).get("sdp", [{}])[0]
- .get("service-match-criteria", {}).get("match-criterion", [{}])[0]
- .get("value"),
-
- "bandwidth": bandwidth,
- "latency": latency,
- "tolerance": tolerance,
-
- "latency_version": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
- .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
- .get("description"),
-
- "reliability": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
- .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
- .get("sle-policy", {}).get("reliability"),
- }
-
- logging.info(f"IXIA Intent realized\n")
- return intent
-
diff --git a/src/planner/planner.py b/src/planner/planner.py
index b5fb1ba1ee624fcc090d4c85dfc252f2463ac042..c2613bd9c7644dd9a91cda04c790156385da9ab2 100644
--- a/src/planner/planner.py
+++ b/src/planner/planner.py
@@ -15,12 +15,8 @@
# This file is an original contribution from Telefonica Innovación Digital S.L.
import logging, random, os, json, heapq
-from src.Constants import SRC_PATH, PCE_EXTERNAL, DEFAULT_LOGGING_LEVEL
-
-# Configure logging to provide clear and informative log messages
-logging.basicConfig(
- level=DEFAULT_LOGGING_LEVEL,
- format='%(levelname)s - %(message)s')
+from src.config.constants import SRC_PATH
+from flask import current_app
class Planner:
"""
@@ -37,8 +33,8 @@ class Planner:
destination = intent.get("ietf-network-slice-service:network-slice-services", {}).get("slice-service", [])[0].get("sdps", {}).get("sdp", [])[1].get("id") or "B"
optimal_path = []
# If using an external PCE
- if PCE_EXTERNAL:
- logging.info("Using external PCE for path planning")
+ if current_app.config["PCE_EXTERNAL"]:
+ logging.debug("Using external PCE for path planning")
def build_slice_input(node_source, node_destination):
return {
"clientName": "demo-client",
@@ -121,9 +117,9 @@ class Planner:
optimal_path.append(next((node for node in topology["nodes"] if node["nodeId"] == hop['nodeId']), None)["name"])
else:
- logging.info("Using internal PCE for path planning")
+ logging.debug("Using internal PCE for path planning")
ietf_dlos = intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"]
- logging.info(ietf_dlos),
+ logging.debug(ietf_dlos),
# Solo asigna los DLOS que existan, el resto a None
dlos = {
"EC": next((item.get("bound") for item in ietf_dlos if item.get("metric-type") == "energy_consumption"), None),
@@ -148,7 +144,7 @@ class Planner:
return energy_metrics
def __retrieve_topology(self):
- if PCE_EXTERNAL:
+ if current_app.config["PCE_EXTERNAL"]:
# TODO : Implement the logic to retrieve topology data from external PCE
# GET /sss/v1/topology/node and /sss/v1/topology/link
with open(os.path.join(SRC_PATH, "planner/ext_topo_ddbb.json"), "r") as archivo:
diff --git a/src/realizers/ixia/NEII_V4.py b/src/realizer/ixia/helpers/NEII_V4.py
similarity index 99%
rename from src/realizers/ixia/NEII_V4.py
rename to src/realizer/ixia/helpers/NEII_V4.py
index f9379d2cc0ddb0aceecb38ad918e0a995b0cebfe..e9bf61a24d0a6b42f6d0179a4d9a92640ab679ec 100644
--- a/src/realizers/ixia/NEII_V4.py
+++ b/src/realizer/ixia/helpers/NEII_V4.py
@@ -16,13 +16,12 @@
from .automatizacion_ne2v4 import automatizacion
import ipaddress, logging
-from src.Constants import IXIA_IP
class NEII_controller:
- def __init__(self, ixia_ip=IXIA_IP):
+ def __init__(self, ixia_ip):
self.ixia_ip = ixia_ip
- def menu_principal(self, ip=IXIA_IP):
+ def menu_principal(self, ip):
'''
Inputs:
Outputs:
diff --git a/src/realizers/ixia/automatizacion_ne2v4.py b/src/realizer/ixia/helpers/automatizacion_ne2v4.py
similarity index 100%
rename from src/realizers/ixia/automatizacion_ne2v4.py
rename to src/realizer/ixia/helpers/automatizacion_ne2v4.py
diff --git a/src/realizer/ixia/ixia_connect.py b/src/realizer/ixia/ixia_connect.py
new file mode 100644
index 0000000000000000000000000000000000000000..c001fc31b3f62b73ede70218b742ddb1ff82c0e4
--- /dev/null
+++ b/src/realizer/ixia/ixia_connect.py
@@ -0,0 +1,9 @@
+from .helpers.NEII_V4 import NEII_controller
+
+def ixia_connect(requests, ixia_ip): # The IP should be sent by parameter
+ response = None
+ neii_controller = NEII_controller(ixia_ip)
+ for intent in requests["services"]:
+ # Send each separate IXIA request
+ response = neii_controller.nscNEII(intent)
+ return response
\ No newline at end of file
diff --git a/src/realizer/ixia/main.py b/src/realizer/ixia/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..8935396eb29974fdad866d62a9b6fde044f3a8f7
--- /dev/null
+++ b/src/realizer/ixia/main.py
@@ -0,0 +1,77 @@
+import logging
+
+def ixia(ietf_intent):
+ """
+ Prepare an Ixia service request based on the IETF intent.
+
+ This method configures an Ixia service request by:
+ 1. Defining endpoint routers
+ 2. Loading a service template
+ 3. Generating a unique service UUID
+ 4. Configuring service endpoints
+ 5. Adding QoS constraints
+
+ Args:
+ ietf_intent (dict): IETF-formatted network slice intent.
+
+ Returns:
+ dict: An Ixia service request for configuration.
+ """
+ metric_bounds = ietf_intent.get("ietf-network-slice-service:network-slice-services", {}) \
+ .get("slo-sle-templates", {}) \
+ .get("slo-sle-template", [{}])[0] \
+ .get("slo-policy", {}) \
+ .get("metric-bound", [])
+
+ # Inicializar valores
+ bandwidth = None
+ latency = None
+ tolerance = None
+
+ # Asignar valores según el tipo de métrica
+ for metric in metric_bounds:
+ metric_type = metric.get("metric-type")
+ bound = metric.get("bound")
+
+ if metric_type == "one-way-bandwidth":
+ bandwidth = bound
+ elif metric_type == "one-way-delay-maximum":
+ latency = bound
+ elif metric_type == "one-way-delay-variation-maximum":
+ tolerance = bound
+
+ # Construcción del diccionario intent
+ intent = {
+ "src_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+ .get("slice-service", [{}])[0]
+ .get("sdps", {}).get("sdp", [{}])[0]
+ .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
+ .get("sdp-peering", {}).get("peer-sap-id"),
+
+ "dst_node_ip": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+ .get("slice-service", [{}])[0]
+ .get("sdps", {}).get("sdp", [{}, {}])[1]
+ .get("attachment-circuits", {}).get("attachment-circuit", [{}])[0]
+ .get("sdp-peering", {}).get("peer-sap-id"),
+
+ "vlan_id": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+ .get("slice-service", [{}])[0]
+ .get("sdps", {}).get("sdp", [{}])[0]
+ .get("service-match-criteria", {}).get("match-criterion", [{}])[0]
+ .get("value"),
+
+ "bandwidth": bandwidth,
+ "latency": latency,
+ "tolerance": tolerance,
+
+ "latency_version": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+ .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
+ .get("description"),
+
+ "reliability": ietf_intent.get("ietf-network-slice-service:network-slice-services", {})
+ .get("slo-sle-templates", {}).get("slo-sle-template", [{}])[0]
+ .get("sle-policy", {}).get("reliability"),
+ }
+
+ logging.info(f"IXIA Intent realized\n")
+ return intent
\ No newline at end of file
diff --git a/src/realizer/main.py b/src/realizer/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2e5be3e41f9724c9f908c8b4babbde8e7b200d2
--- /dev/null
+++ b/src/realizer/main.py
@@ -0,0 +1,27 @@
+from .select_way import select_way
+from .nrp_handler import nrp_handler
+
+def realizer(ietf_intent, need_nrp=False, order=None, nrp=None, controller_type=None, response=None):
+ """
+ Manage the slice creation workflow.
+
+ This method handles two primary scenarios:
+ 1. Interact with network controllers for NRP (Network Resource Partition) operations when need_nrp is True
+ 2. Slice service selection when need_nrp is False
+
+ Args:
+ ietf_intent (dict): IETF-formatted network slice intent.
+ need_nrp (bool, optional): Flag to indicate if NRP operations are needed. Defaults to False.
+ order (str, optional): Type of NRP operation (READ, UPDATE, CREATE). Defaults to None.
+ nrp (dict, optional): Specific Network Resource Partition to operate on. Defaults to None.
+ """
+ if need_nrp:
+ # Perform NRP-related operations
+ nrp_view = nrp_handler(order, nrp)
+ return nrp_view
+ else:
+ # Select slice service method
+ way = ietf_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"]["tag-type"]["value"]
+ way = "L2VPN"
+ request = select_way(controller=controller_type, way=way, ietf_intent=ietf_intent, response=response)
+ return request
diff --git a/src/realizer/nrp_handler.py b/src/realizer/nrp_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba4c1fff9793d44081cd618ed25ea7a1849289b2
--- /dev/null
+++ b/src/realizer/nrp_handler.py
@@ -0,0 +1,56 @@
+import logging, os, json
+from src.config.constants import DATABASE_PATH
+
+def nrp_handler(request, nrp):
+ """
+ Manage Network Resource Partition (NRP) operations.
+
+ This method handles CRUD operations for Network Resource Partitions,
+ interacting with Network Controllers (currently done statically via a JSON-based database file).
+
+ Args:
+ request (str): The type of operation to perform.
+ Supported values:
+ - "CREATE": Add a new NRP to the database
+ - "READ": Retrieve the current NRP view
+ - "UPDATE": Update an existing NRP (currently a placeholder)
+
+ nrp (dict): The Network Resource Partition details to create or update.
+
+ Returns:
+ None or answer:
+ - For "CREATE": Returns the response from the controller (currently using a static JSON)
+ - For "READ": Gets the NRP view from the controller (currently using a static JSON)
+ - For "UPDATE": Placeholder for update functionality
+
+ Notes:
+ - Uses a local JSON file "nrp_ddbb.json" to store NRP information as controller operation is not yet defined
+ """
+ if request == "CREATE":
+ # TODO: Implement actual request to Controller to create an NRP
+ logging.debug("Creating NRP")
+
+ # Load existing NRP database
+ with open(os.path.join(DATABASE_PATH, "nrp_ddbb.json"), "r") as archivo:
+ nrp_view = json.load(archivo)
+
+ # Append new NRP to the view
+ nrp_view.append(nrp)
+
+ # Placeholder for controller POST request
+ answer = None
+ return answer
+ elif request == "READ":
+ # TODO: Request to Controller to get topology and current NRP view
+ logging.debug("Reading Topology")
+
+ # Load NRP database
+ with open(os.path.join(DATABASE_PATH, "nrp_ddbb.json"), "r") as archivo:
+ # self.__nrp_view = json.load(archivo)
+ nrp_view = json.load(archivo)
+ return nrp_view
+
+ elif request == "UPDATE":
+ # TODO: Implement request to Controller to update NRP
+ logging.debug("Updating NRP")
+ answer = ""
\ No newline at end of file
diff --git a/src/realizer/select_way.py b/src/realizer/select_way.py
new file mode 100644
index 0000000000000000000000000000000000000000..548dff19d19fdb7214890d1c7f0ea14729e787b3
--- /dev/null
+++ b/src/realizer/select_way.py
@@ -0,0 +1,33 @@
+import logging
+from .ixia.main import ixia
+from .tfs.main import tfs
+
+def select_way(controller=None, way=None, ietf_intent=None, response=None):
+ """
+ Determine the method of slice realization.
+
+ Args:
+ controller (str): The controller to use for slice realization.
+ Supported values:
+ - "IXIA": IXIA NEII for network testing
+ - "TFS": TeraFlow Service for network slice management
+ way (str): The type of technology to use.
+ Supported values:
+ - "L2VPN": Layer 2 Virtual Private Network
+ - "L3VPN": Layer 3 Virtual Private Network
+
+ ietf_intent (dict): IETF-formatted network slice intent.
+
+ Returns:
+ dict: A realization request for the specified network slice type.
+
+ """
+ realizing_request = None
+ if controller == "TFS":
+ realizing_request = tfs(ietf_intent, way, response)
+ elif controller == "IXIA":
+ realizing_request = ixia(ietf_intent)
+ else:
+ logging.warning(f"Unsupported controller: {controller}. Defaulting to TFS realization.")
+ realizing_request = tfs(ietf_intent, way, response)
+ return realizing_request
\ No newline at end of file
diff --git a/src/realizer/send_controller.py b/src/realizer/send_controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..53667c8564273ec3a483cc82738e2375eecfc82f
--- /dev/null
+++ b/src/realizer/send_controller.py
@@ -0,0 +1,15 @@
+import logging
+from flask import current_app
+from .tfs.tfs_connect import tfs_connect
+from .ixia.ixia_connect import ixia_connect
+
+def send_controller(controller_type, requests):
+ if current_app.config["DUMMY_MODE"]:
+ return True
+ if controller_type == "TFS":
+ response = tfs_connect(requests, current_app.config["TFS_IP"])
+ logging.info("Request sent to Teraflow")
+ elif controller_type == "IXIA":
+ response = ixia_connect(requests, current_app.config["IXIA_IP"])
+ logging.info("Requests sent to Ixia")
+ return response
diff --git a/src/realizer/tfs/helpers/cisco_connector.py b/src/realizer/tfs/helpers/cisco_connector.py
new file mode 100644
index 0000000000000000000000000000000000000000..230e7cbd146870229b91e4f3b6dcfd75a7f15f5f
--- /dev/null
+++ b/src/realizer/tfs/helpers/cisco_connector.py
@@ -0,0 +1,80 @@
+import logging
+from netmiko import ConnectHandler
+
+class cisco_connector():
+ def __init__(self, address, configs=None):
+ self.address=address
+ self.configs=configs
+
+ def execute_commands(self, commands):
+ try:
+ # Configuración del dispositivo
+ device = {
+ 'device_type': 'cisco_xr', # Esto depende del tipo de dispositivo (ej: 'cisco_ios', 'cisco_xr', 'linux', etc.)
+ 'host': self.address,
+ 'username': 'cisco',
+ 'password': 'cisco12345',
+ }
+
+ # Conexión por SSH
+ connection = ConnectHandler(**device)
+
+ # Enviar comandos
+ output = connection.send_config_set(commands)
+ logging.debug(output)
+
+ # Cerrar la conexión
+ connection.disconnect()
+
+ except Exception as e:
+ logging.error(f"Failed to execute commands on {self.address}: {str(e)}")
+
+ def create_command_template(self, config):
+
+ commands = [
+ "l2vpn",
+ f"pw-class l2vpn_vpws_profile_example_{config['number']}",
+ "encapsulation mpls"
+ ]
+
+ commands.extend([
+ "transport-mode vlan passthrough",
+ "control-word"
+ ])
+
+ commands.extend([
+ f"preferred-path interface tunnel-te {config['number']}",
+ "exit",
+ "exit"
+ ])
+
+ commands.extend([
+ "xconnect group l2vpn_vpws_group_example",
+ f"p2p {config['ni_name']}",
+ f"interface {config['interface']}.{config['vlan']}",
+ f"neighbor ipv4 {config['remote_router']} pw-id {config['vlan']}",
+ "no pw-class l2vpn_vpws_profile_example",
+ f"pw-class l2vpn_vpws_profile_example_{config['number']}"
+ ])
+
+
+ return commands
+
+ def full_create_command_template(self):
+ commands =[]
+ for config in self.configs:
+ commands_temp = self.create_command_template(config)
+ commands.extend(commands_temp)
+ commands.append("commit")
+ commands.append("end")
+ return commands
+
+ def create_command_template_delete(self):
+ commands = [
+ "no l2vpn",
+ ]
+
+ commands.append("commit")
+ commands.append("end")
+
+ return commands
\ No newline at end of file
diff --git a/src/realizer/tfs/helpers/tfs_connector.py b/src/realizer/tfs/helpers/tfs_connector.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe52c3ad92976f27abae435d219fa4c1af13bd66
--- /dev/null
+++ b/src/realizer/tfs/helpers/tfs_connector.py
@@ -0,0 +1,38 @@
+import logging, requests, json
+
+class tfs_connector():
+ def webui_post(self, tfs_ip, service):
+ user="admin"
+ password="admin"
+ token=""
+ session = requests.Session()
+ session.auth = (user, password)
+ url=f'http://{tfs_ip}/webui'
+ response=session.get(url=url)
+ for item in response.iter_lines():
+ if("csrf_token" in str(item)):
+ string=str(item).split('")
@@ -89,16 +87,18 @@ class IxiaSlice(Resource):
def get(self, slice_id):
"""Retrieve a specific slice"""
controller = NSController(controller_type="IXIA")
- return controller.get_flows(slice_id)
+ data, code = Api(controller).get_flows(slice_id)
+ return data, code
@ixia_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
- @ixia_ns.response(200, "Transport network slice deleted successfully.")
+ @ixia_ns.response(204, "Transport network slice deleted successfully.")
@ixia_ns.response(404, "Transport network slice not found.")
@ixia_ns.response(500, "Internal server error")
def delete(self, slice_id):
"""Delete a slice"""
controller = NSController(controller_type="IXIA")
- return controller.delete_flows(slice_id)
+ data, code = Api(controller).delete_flows(slice_id)
+ return data, code
@ixia_ns.expect(slice_ddbb_model, validate=True)
@ixia_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
@@ -109,4 +109,5 @@ class IxiaSlice(Resource):
"""Modify a slice"""
json_data = request.get_json()
controller = NSController(controller_type="IXIA")
- return controller.modify_flow(slice_id, json_data)
\ No newline at end of file
+ data, code = Api(controller).modify_flow(slice_id, json_data)
+ return data, code
\ No newline at end of file
diff --git a/swagger/models/create_models.py b/swagger/models/create_models.py
index 94ca83bc53b978beb68512dd5959452375256f67..9e965bfdfafba73ae3d3d13a9d717f5d22364d30 100644
--- a/swagger/models/create_models.py
+++ b/swagger/models/create_models.py
@@ -300,27 +300,42 @@ def create_ietf_network_slice_nbi_yang_model(slice_ns):
slice_response_model = slice_ns.model(
"SliceResponse",
{
- "status": fields.String(description="Status of the request", example="success"),
- "slices": fields.List(
- fields.Nested(
- slice_ns.model(
- "SliceDetails",
- {
- "id": fields.String(description="Slice ID", example="CU-UP1_DU1"),
- "source": fields.String(description="Source IP", example="100.2.1.2"),
- "destination": fields.String(description="Destination IP", example="100.1.1.2"),
- "vlan": fields.String(description="VLAN ID", example="100"),
- "bandwidth(Mbps)": fields.Integer(
- description="Bandwidth in Mbps", example=120
- ),
- "latency(ms)": fields.Integer(
- description="Latency in milliseconds", example=4
+ "success": fields.Boolean(description="Indicates if the request was successful", example=True),
+ "data": fields.Nested(
+ slice_ns.model(
+ "SliceData",
+ {
+ "slices": fields.List(
+ fields.Nested(
+ slice_ns.model(
+ "SliceDetails",
+ {
+ "id": fields.String(description="Slice ID", example="slice-service-11327140-7361-41b3-aa45-e84a7fb40be9"),
+ "source": fields.String(description="Source IP", example="10.60.11.3"),
+ "destination": fields.String(description="Destination IP", example="10.60.60.105"),
+ "vlan": fields.String(description="VLAN ID", example="100"),
+ "requirements": fields.List(
+ fields.Nested(
+ slice_ns.model(
+ "SliceRequirement",
+ {
+ "constraint_type": fields.String(description="Type of constraint", example="one-way-bandwidth[kbps]"),
+ "constraint_value": fields.String(description="Constraint value", example="2000")
+ }
+ )
+ ),
+ description="List of requirements for the slice"
+ )
+ }
+ )
),
- },
- )
- ),
- description="List of slices",
+ description="List of slices"
+ ),
+ "setup_time": fields.Float(description="Slice setup time in milliseconds", example=12.57),
+ }
+ )
),
- },
+ "error": fields.String(description="Error message if request failed", example=None)
+ }
)
return slice_ddbb_model, slice_response_model
\ No newline at end of file
diff --git a/swagger/tfs_namespace.py b/swagger/tfs_namespace.py
index c9c3e07f591d13390df92712a746843a8d2326bd..208da1852b5ba7ac8cd21c18ec077f1be91e01c3 100644
--- a/swagger/tfs_namespace.py
+++ b/swagger/tfs_namespace.py
@@ -16,7 +16,8 @@
from flask import request
from flask_restx import Namespace, Resource, fields, reqparse
-from src.network_slice_controller import NSController
+from src.main import NSController
+from src.api.main import Api
import json
from swagger.models.create_models import create_gpp_nrm_28541_model, create_ietf_network_slice_nbi_yang_model
@@ -33,8 +34,8 @@ gpp_network_slice_request_model = create_gpp_nrm_28541_model(tfs_ns)
slice_ddbb_model, slice_response_model = create_ietf_network_slice_nbi_yang_model(tfs_ns)
upload_parser = reqparse.RequestParser()
-upload_parser.add_argument('file', location='files', type='FileStorage', help="Archivo a subir")
-upload_parser.add_argument('json_data', location='form', help="Datos JSON en formato string")
+upload_parser.add_argument('file', location='files', type='FileStorage', help="File to upload")
+upload_parser.add_argument('json_data', location='form', help="JSON Data in string format")
# Namespace Controllers
@tfs_ns.route("/slice")
@@ -46,10 +47,11 @@ class TfsSliceList(Resource):
def get(self):
"""Retrieve all slices"""
controller = NSController(controller_type="TFS")
- return controller.get_flows()
+ data, code = Api(controller).get_flows()
+ return data, code
@tfs_ns.doc(summary="Submit a transport network slice request", description="This endpoint allows clients to submit transport network slice requests using a JSON payload.")
- @tfs_ns.response(200, "Slice request successfully processed", slice_response_model)
+ @tfs_ns.response(201,"Slice created successfully", slice_response_model)
@tfs_ns.response(400, "Invalid request format")
@tfs_ns.response(500, "Internal server error")
@tfs_ns.expect(upload_parser)
@@ -62,12 +64,20 @@ class TfsSliceList(Resource):
uploaded_file = request.files.get('file')
if uploaded_file:
if not uploaded_file.filename.endswith('.json'):
- return {"error": "Only JSON files allowed"}, 400
+ return {
+ "success": False,
+ "data": None,
+ "error": "Only JSON files allowed"
+ }, 400
try:
json_data = json.load(uploaded_file) # Convert file to JSON
except json.JSONDecodeError:
- return {"error": "JSON file not valid"}, 400
+ return {
+ "success": False,
+ "data": None,
+ "error": "JSON file not valid"
+ }, 400
# If no file was uploaded, try to get the JSON data from the form
if json_data is None:
@@ -76,23 +86,33 @@ class TfsSliceList(Resource):
try:
json_data = json.loads(raw_json) # Convert string to JSON
except json.JSONDecodeError:
- return {"error": "JSON file not valid"}, 400
+ return {
+ "success": False,
+ "data": None,
+ "error": "JSON file not valid"
+ }, 400
# If no JSON data was found, return an error
if json_data is None:
- return {"error": "No data sent"}, 400
+ return {
+ "success": False,
+ "data": None,
+ "error": "No data sent"
+ }, 400
# Process the JSON data with the NSController
controller = NSController(controller_type="TFS")
- return controller.add_flow(json_data)
+ data, code = Api(controller).add_flow(json_data)
+ return data, code
@tfs_ns.doc(summary="Delete all transport network slices", description="Deletes all transport network slices from the slice controller.")
- @tfs_ns.response(200, "All transport network slices deleted successfully.")
+ @tfs_ns.response(204, "All transport network slices deleted successfully.")
@tfs_ns.response(500, "Internal server error")
def delete(self):
"""Delete all slices"""
controller = NSController(controller_type="TFS")
- return controller.delete_flows()
+ data, code = Api(controller).delete_flows()
+ return data, code
@tfs_ns.route("/slice/")
@@ -105,26 +125,29 @@ class TfsSlice(Resource):
def get(self, slice_id):
"""Retrieve a specific slice"""
controller = NSController(controller_type="TFS")
- return controller.get_flows(slice_id)
+ data, code = Api(controller).get_flows(slice_id)
+ return data, code
@tfs_ns.doc(summary="Delete a specific transport network slice", description="Deletes a specific transport network slice from the slice controller based on the provided `slice_id`.")
- @tfs_ns.response(200, "Transport network slice deleted successfully.")
+ @tfs_ns.response(204, "Transport network slice deleted successfully.")
@tfs_ns.response(404, "Transport network slice not found.")
@tfs_ns.response(500, "Internal server error")
def delete(self, slice_id):
"""Delete a slice"""
controller = NSController(controller_type="TFS")
- return controller.delete_flows(slice_id)
+ data, code = Api(controller).delete_flows(slice_id)
+ return data, code
@tfs_ns.expect(slice_ddbb_model, validate=True)
@tfs_ns.doc(summary="Modify a specific transport network slice", description="Returns a specific slice that has been modified")
- @tfs_ns.response(200, "Slice modified", slice_ddbb_model)
+ @tfs_ns.response(200, "Slice modified", slice_response_model)
@tfs_ns.response(404, "Transport network slice not found.")
@tfs_ns.response(500, "Internal server error")
def put(self, slice_id):
"""Modify a slice"""
json_data = request.get_json()
controller = NSController(controller_type="TFS")
- return controller.modify_flow(slice_id, json_data)
+ data, code = Api(controller).modify_flow(slice_id, json_data)
+ return data, code