diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..a50341ed8b82e398eaec3a3f02ed93d75168b411 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,81 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +stages: + - build + - unit_test + +# Build, tag, and push the Docker image to the GitLab Docker registry +build nsc: + variables: + IMAGE_NAME: 'nsc' + IMAGE_TAG: 'test' + stage: build + before_script: + - docker image prune --force + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./Dockerfile . + - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + after_script: + - docker image prune --force + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + +# Apply unit test to the component +unit_test nsc: + timeout: 15m + variables: + IMAGE_NAME: 'nsc' # name of the microservice + IMAGE_TAG: 'test' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build nsc + before_script: + # Do Docker cleanup + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker image prune --force + - docker network prune --force + - docker volume prune --all --force + - docker buildx prune --force + + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker run --name $IMAGE_NAME -d -p 8081:8081 -v "$PWD/src/tests:/opt/results" $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - sleep 5 + - docker ps -a + - docker logs $IMAGE_NAME + - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/report.xml" + - docker exec -i $IMAGE_NAME bash -c "coverage report --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + # Clean up + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker network prune --force + - docker volume prune --all --force + - docker image prune --force + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + artifacts: + when: always + reports: + junit: src/tests/report.xml diff --git a/README.md b/README.md index 2208b703c0b744ab8c9b965f58ca8cbdede0db8e..c05f28cfeff08dd67c34bfa3df805c7f5dc744d1 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ The Network Slice Controller (NSC) is a component defined by the IETF to orchest - [Ixia Configuration](#ixia-configuration) - [WebUI](#webui-1) 9. [Usage](#usage) +10. [Available Branches and Releases](#available-branches-and-releases) --- @@ -175,3 +176,16 @@ To use the NSC, just build the image an run it in a container following these st Send slice requests via **API** (/nsc) or **WebUI** (/webui) +## Available branches and releases + +[![Latest Release](https://labs.etsi.org/rep/tfs/nsc/-/badges/release.svg)](https://labs.etsi.org/rep/tfs/nsc/-/releases) + +- The branch `main` ([![pipeline status](https://labs.etsi.org/rep/tfs/nsc/badges/main/pipeline.svg)](https://labs.etsi.org/rep/tfs/nsc/-/commits/main) [![coverage report](https://labs.etsi.org/rep/tfs/nsc/badges/main/coverage.svg)](https://labs.etsi.org/rep/tfs/nsc/-/commits/main)), points always to the latest stable version of the TeraFlowSDN Network Slice Controller (NSC). + +- The branches `release/X.Y.Z`, point to the code for the different release versions indicated in the branch name. + - Code in these branches can be considered stable, and no new features are planned. + - In case of bugs, point releases increasing revision number (Z) might be created. + +- The `develop` ([![pipeline status](https://labs.etsi.org/rep/tfs/nsc/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/nsc/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/nsc/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/nsc/-/commits/develop)) branch is the main development branch and contains the latest contributions. + - **Use it with care! It might not be stable.** + - The latest developments and contributions are added to this branch for testing and validation before reaching a release. diff --git a/requirements.txt b/requirements.txt index 16226286ce03b4681ea504f0c75e8279a6e43056..6e8674fea7327e70902540cd2b1847086d5d1176 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,6 @@ flask-restx netmiko requests pandas -dotenv \ No newline at end of file +dotenv +coverage +pytest diff --git a/src/api/main.py b/src/api/main.py index 47b6344df24dce90ab0bade54205af9cc0d33f01..8cf60b81ebebba19ee02f8599db94644f91a6e98 100644 --- a/src/api/main.py +++ b/src/api/main.py @@ -134,6 +134,9 @@ class Api: message="Slice modified successfully", data=result ) + except ValueError as e: + # Handle case where no slices are found + return send_response(False, code=404, message=str(e)) except Exception as e: # Handle unexpected errors return send_response(False, code=500, message=str(e)) diff --git a/src/mapper/slo_viability.py b/src/mapper/slo_viability.py index 92b215ab2e4fcf588de475f43b438457aec2f3a7..a91b9296e6d67c0270e232faea491bb7f7d1a914 100644 --- a/src/mapper/slo_viability.py +++ b/src/mapper/slo_viability.py @@ -39,6 +39,7 @@ def slo_viability(slice_slos, nrp_slos): "one-way-packet-loss", "two-way-packet-loss"], "min": ["one-way-bandwidth", "two-way-bandwidth", "shared-bandwidth"] } + score = 0 flexibility_scores = [] for slo in slice_slos: for nrp_slo in nrp_slos['slos']: diff --git a/src/planner/hrat_planner/hrat.py b/src/planner/hrat_planner/hrat.py index 2686af2fd28b7b213f1f1bd1596d3edf9c01ea49..0b370d7430016c0e4c2effff8de6bbbbd87f8bd2 100644 --- a/src/planner/hrat_planner/hrat.py +++ b/src/planner/hrat_planner/hrat.py @@ -63,10 +63,9 @@ def hrat_planner(data: str, ip: str, action: str = "create") -> dict: ] } } - response = requests.delete(url, headers=headers, json=payload, timeout=15) + response = requests.delete(url, headers=headers, json=payload, timeout=1) elif action == "create": - # Send creation request with full intent data - response = requests.post(url, headers=headers, json=data, timeout=15) + response = requests.post(url, headers=headers, json=data, timeout=1) else: logging.error("Invalid action. Use 'create' or 'delete'.") return data_static diff --git a/src/planner/tfs_optical_planner/tfs_optical.py b/src/planner/tfs_optical_planner/tfs_optical.py index 248991583b1072cb18f71af526fb7d0654b7fced..0d5bc79a76f9765ce6f2826f0e6b5d2a0f6a346c 100644 --- a/src/planner/tfs_optical_planner/tfs_optical.py +++ b/src/planner/tfs_optical_planner/tfs_optical.py @@ -176,7 +176,7 @@ def send_request(source, destination, ip): logging.debug(f"Payload for path computation: {json.dumps(payload, indent=2)}") try: - response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=15) + response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=1) return json.loads(response.text) except requests.exceptions.RequestException: logging.warning("Error connecting to the Optical Planner service. Skipping optical planning.") diff --git a/src/tests/requests/3ggpp_template_green.json b/src/tests/requests/3ggpp_template_green.json new file mode 100644 index 0000000000000000000000000000000000000000..67a1367b093b84c1dd589c803cee55cb130ce232 --- /dev/null +++ b/src/tests/requests/3ggpp_template_green.json @@ -0,0 +1,176 @@ +{ + "NetworkSlice1": { + "operationalState": "", + "administrativeState": "", + "serviceProfileList": [], + "networkSliceSubnetRef": "TopSliceSubnet1" + }, + "TopSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "TOP_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "TopId", + "pLMNInfoList": null, + "TopSliceSubnetProfile": { + "EnergyEfficiency": 400, + "EnergyConsumption": 200, + "RenewableEnergyUsage": 0.5, + "CarbonEmissions": 100 + } + } + ], + "networkSliceSubnetRef": [ + "CNSliceSubnet1", + "RANSliceSubnet1" + ] + }, + "CNSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "CN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "CNId", + "pLMNInfoList": null, + "CNSliceSubnetProfile": { + "EnergyEfficiency": 400, + "EnergyConsumption": 200, + "RenewableEnergyUsage": 0.5, + "CarbonEmissions": 100 + } + } + ] + }, + "RANSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "RANId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 40, + "MaxThpt": 80 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 40, + "MaxThpt": 80 + }, + "dLLatency": 8, + "uLLatency": 8, + "EnergyEfficiency": 400, + "EnergyConsumption": 200, + "RenewableEnergyUsage": 0.5, + "CarbonEmissions": 100 + } + } + ], + "networkSliceSubnetRef": [ + "MidhaulSliceSubnet1" + ] + }, + "MidhaulSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "MidhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "EnergyEfficiency": 5, + "EnergyConsumption": 18000, + "RenewableEnergyUsage": 0.5, + "CarbonEmissions": 650 + } + } + ], + "EpTransport": [ + "EpTransport CU-UP1", + "EpTransport DU3" + ] + }, + "BackhaulSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "BackhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 40, + "MaxThpt": 80 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 40, + "MaxThpt": 80 + }, + "dLLatency": 8, + "uLLatency": 8, + "EnergyEfficiency": 400, + "EnergyConsumption": 200, + "RenewableEnergyUsage": 0.5, + "CarbonEmissions": 100 + } + } + ], + "EpTransport": [ + "EpTransport CU-UP2", + "EpTransport UPF" + ] + }, + "EpTransport CU-UP1": { + "IpAddress": "1.1.1.100", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "300" + }, + "NextHopInfo": "1.1.1.1", + "qosProfile": "5QI100", + "EpApplicationRef": [ + "EP_F1U CU-UP1" + ] + }, + "EP_F1U CU-UP1": { + "localAddress": "100.1.1.100", + "remoteAddress": "200.1.1.100", + "epTransportRef": [ + "EpTransport CU-UP1" + ] + }, + "EpTransport DU3": { + "IpAddress": "2.2.2.100", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "300" + }, + "NextHopInfo": "2.2.2.2", + "qosProfile": "5QI100", + "EpApplicationRef": [ + "EP_F1U DU3" + ] + }, + "EP_F1U DU3": { + "localAddress": "200.1.1.100", + "remoteAddress": "100.1.1.100", + "epTransportRef": [ + "EpTransport DU3" + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json new file mode 100644 index 0000000000000000000000000000000000000000..ed80ec01d699a6bea39f99b19af1e22550c1c851 --- /dev/null +++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul.json @@ -0,0 +1,267 @@ +{ + "NetworkSlice1": { + "operationalState": "", + "administrativeState": "", + "serviceProfileList": [], + "networkSliceSubnetRef": "TopSliceSubnet1" + }, + "TopSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "TOP_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "TopId", + "pLMNInfoList": null, + "TopSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "RANSliceSubnet1" + ] + }, + "RANSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "RANId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "BackhaulSliceSubnetN2", + "BackhaulSliceSubnetN31", + "BackhaulSliceSubnetN32" + ] + }, + "BackhaulSliceSubnetN2": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "BackhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 10, + "MaxThpt": 20 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 10, + "MaxThpt": 20 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "EpTransport": [ + "EpTransport CU-N2", + "EpTransport AMF-N2" + ] + }, + "BackhaulSliceSubnetN31": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "BackhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 100, + "MaxThpt": 200 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 50, + "MaxThpt": 100 + }, + "dLLatency": 10, + "uLLatency": 10 + } + } + ], + "EpTransport": [ + "EpTransport CU-N31", + "EpTransport UPF-N31" + ] + }, + "BackhaulSliceSubnetN32": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "BackhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 200, + "MaxThpt": 400 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 100, + "MaxThpt": 200 + }, + "dLLatency": 5, + "uLLatency": 5 + } + } + ], + "EpTransport": [ + "EpTransport CU-N32", + "EpTransport UPF-N32" + ] + }, + "EpTransport CU-N2": { + "IpAddress": "10.60.11.3", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "100" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "A", + "EpApplicationRef": [ + "EP_N2 CU-N2" + ] + }, + "EP_N2 CU-N2": { + "localAddress": "10.60.11.3", + "remoteAddress": "10.60.60.105", + "epTransportRef": [ + "EpTransport CU-N2" + ] + }, + "EpTransport AMF-N2": { + "IpAddress": "10.60.60.105", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "100" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "A", + "EpApplicationRef": [ + "EP_N2 AMF-N2" + ] + }, + "EP_N2 AMF-N2": { + "localAddress": "10.60.60.105", + "remoteAddress": "10.60.11.3", + "epTransportRef": [ + "EpTransport UPF-N2" + ] + }, + "EpTransport CU-N32": { + "IpAddress": "10.60.11.3", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "101" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "B", + "EpApplicationRef": [ + "EP_N3 CU-N32" + ] + }, + "EP_N3 CU-N32": { + "localAddress": "10.60.11.3", + "remoteAddress": "10.60.10.6", + "epTransportRef": [ + "EpTransport CU-N32" + ] + }, + "EpTransport UPF-N32": { + "IpAddress": "10.60.10.6", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "101" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "B", + "EpApplicationRef": [ + "EP_N3 UPF-N32" + ] + }, + "EP_N3 UPF-N32": { + "localAddress": "10.60.10.6", + "remoteAddress": "10.60.11.3", + "epTransportRef": [ + "EpTransport UPF-N32" + ] + }, + "EpTransport CU-N31": { + "IpAddress": "10.60.11.3", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "102" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "C", + "EpApplicationRef": [ + "EP_N3 CU-N31" + ] + }, + "EP_N3 CU-N31": { + "localAddress": "10.60.11.3", + "remoteAddress": "10.60.60.106", + "epTransportRef": [ + "EpTransport CU-N31" + ] + }, + "EpTransport UPF-N31": { + "IpAddress": "10.60.60.106", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "102" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "C", + "EpApplicationRef": [ + "EP_N3 UPF-N31" + ] + }, + "EP_N3 UPF-N31": { + "localAddress": "10.60.60.106", + "remoteAddress": "10.60.11.3", + "epTransportRef": [ + "EpTransport UPF-N31" + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json new file mode 100644 index 0000000000000000000000000000000000000000..9dab29465a9d992e795a50f1a063bbb5ca05104e --- /dev/null +++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_1.json @@ -0,0 +1,131 @@ +{ + "NetworkSlice1": { + "operationalState": "", + "administrativeState": "", + "serviceProfileList": [], + "networkSliceSubnetRef": "TopSliceSubnet1" + }, + "TopSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "TOP_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "TopId", + "pLMNInfoList": null, + "TopSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "RANSliceSubnet1" + ] + }, + "RANSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "RANId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "BackhaulSliceSubnetN2" + ] + }, + "BackhaulSliceSubnetN2": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "BackhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 1, + "MaxThpt": 2 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 1, + "MaxThpt": 2 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "EpTransport": [ + "EpTransport CU-N2", + "EpTransport AMF-N2" + ] + }, + "EpTransport CU-N2": { + "IpAddress": "10.60.11.3", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "100" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "A", + "EpApplicationRef": [ + "EP_N2 CU-N2" + ] + }, + "EP_N2 CU-N2": { + "localAddress": "10.60.11.3", + "remoteAddress": "10.60.60.105", + "epTransportRef": [ + "EpTransport CU-N2" + ] + }, + "EpTransport AMF-N2": { + "IpAddress": "10.60.60.105", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "100" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "A", + "EpApplicationRef": [ + "EP_N2 AMF-N2" + ] + }, + "EP_N2 AMF-N2": { + "localAddress": "10.60.60.105", + "remoteAddress": "10.60.11.3", + "epTransportRef": [ + "EpTransport UPF-N2" + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json new file mode 100644 index 0000000000000000000000000000000000000000..d287a04fbf1da5202daf1a71c98ff2c509535523 --- /dev/null +++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_2.json @@ -0,0 +1,131 @@ +{ + "NetworkSlice1": { + "operationalState": "", + "administrativeState": "", + "serviceProfileList": [], + "networkSliceSubnetRef": "TopSliceSubnet1" + }, + "TopSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "TOP_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "TopId", + "pLMNInfoList": null, + "TopSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "RANSliceSubnet1" + ] + }, + "RANSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "RANId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "BackhaulSliceSubnetN32" + ] + }, + "BackhaulSliceSubnetN32": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "BackhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 200, + "MaxThpt": 400 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 100, + "MaxThpt": 200 + }, + "dLLatency": 5, + "uLLatency": 5 + } + } + ], + "EpTransport": [ + "EpTransport CU-N32", + "EpTransport UPF-N32" + ] + }, + "EpTransport CU-N32": { + "IpAddress": "10.60.11.3", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "101" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "B", + "EpApplicationRef": [ + "EP_N3 CU-N32" + ] + }, + "EP_N3 CU-N32": { + "localAddress": "10.60.11.3", + "remoteAddress": "10.60.10.6", + "epTransportRef": [ + "EpTransport CU-N32" + ] + }, + "EpTransport UPF-N32": { + "IpAddress": "10.60.10.6", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "101" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "B", + "EpApplicationRef": [ + "EP_N3 UPF-N32" + ] + }, + "EP_N3 UPF-N32": { + "localAddress": "10.60.10.6", + "remoteAddress": "10.60.11.3", + "epTransportRef": [ + "EpTransport UPF-N32" + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json new file mode 100644 index 0000000000000000000000000000000000000000..55232e8eb5a17bfa803995f0751933c82e1df9ec --- /dev/null +++ b/src/tests/requests/3gpp_template_UC1PoC2_backhaul_request_3.json @@ -0,0 +1,131 @@ +{ + "NetworkSlice1": { + "operationalState": "", + "administrativeState": "", + "serviceProfileList": [], + "networkSliceSubnetRef": "TopSliceSubnet1" + }, + "TopSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "TOP_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "TopId", + "pLMNInfoList": null, + "TopSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "RANSliceSubnet1" + ] + }, + "RANSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "RANId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 310, + "MaxThpt": 620 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 160, + "MaxThpt": 320 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "BackhaulSliceSubnetN31" + ] + }, + "BackhaulSliceSubnetN31": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "BackhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 100, + "MaxThpt": 200 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 50, + "MaxThpt": 100 + }, + "dLLatency": 10, + "uLLatency": 10 + } + } + ], + "EpTransport": [ + "EpTransport CU-N31", + "EpTransport UPF-N31" + ] + }, + "EpTransport CU-N31": { + "IpAddress": "10.60.11.3", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "102" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "C", + "EpApplicationRef": [ + "EP_N3 CU-N31" + ] + }, + "EP_N3 CU-N31": { + "localAddress": "10.60.11.3", + "remoteAddress": "10.60.60.106", + "epTransportRef": [ + "EpTransport CU-N31" + ] + }, + "EpTransport UPF-N31": { + "IpAddress": "10.60.60.106", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "102" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "C", + "EpApplicationRef": [ + "EP_N3 UPF-N31" + ] + }, + "EP_N3 UPF-N31": { + "localAddress": "10.60.60.106", + "remoteAddress": "10.60.11.3", + "epTransportRef": [ + "EpTransport UPF-N31" + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json b/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json new file mode 100644 index 0000000000000000000000000000000000000000..300f8666fbb20d501052194393ab5232d22d3424 --- /dev/null +++ b/src/tests/requests/3gpp_template_UC1PoC2_midhaul.json @@ -0,0 +1,267 @@ +{ + "NetworkSlice1": { + "operationalState": "", + "administrativeState": "", + "serviceProfileList": [], + "networkSliceSubnetRef": "TopSliceSubnet1" + }, + "TopSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "TOP_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "TopId", + "pLMNInfoList": null, + "TopSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 410, + "MaxThpt": 820 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 210, + "MaxThpt": 420 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "RANSliceSubnet1" + ] + }, + "RANSliceSubnet1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "RANId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 410, + "MaxThpt": 820 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 210, + "MaxThpt": 220 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "networkSliceSubnetRef": [ + "MidhaulSliceSubnetF1c", + "MidhaulSliceSubnetF1u1", + "MidhaulSliceSubnetF1u2" + ] + }, + "MidhaulSliceSubnetF1c": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "MidhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 10, + "MaxThpt": 20 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 10, + "MaxThpt": 20 + }, + "dLLatency": 20, + "uLLatency": 20 + } + } + ], + "EpTransport": [ + "EpTransport CU-F1c", + "EpTransport DU-F1c" + ] + }, + "MidhaulSliceSubnetF1u1": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "MidhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 200, + "MaxThpt": 400 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 100, + "MaxThpt": 200 + }, + "dLLatency": 5, + "uLLatency": 5 + } + } + ], + "EpTransport": [ + "EpTransport CU-F1u1", + "EpTransport DU-F1u1" + ] + }, + "MidhaulSliceSubnetF1u2": { + "operationalState": "", + "administrativeState": "", + "nsInfo": {}, + "managedFunctionRef": [], + "networkSliceSubnetType": "RAN_SLICESUBNET", + "SliceProfileList": [ + { + "sliceProfileId": "MidhaulId", + "pLMNInfoList": null, + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 200, + "MaxThpt": 400 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 100, + "MaxThpt": 200 + }, + "dLLatency": 10, + "uLLatency": 10 + } + } + ], + "EpTransport": [ + "EpTransport CU-F1u2", + "EpTransport DU-F1u2" + ] + }, + "EpTransport CU-F1c": { + "IpAddress": "10.60.10.2", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "100" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "A", + "EpApplicationRef": [ + "EP_F1C CU-F1c" + ] + }, + "EP_F1C CU-F1c": { + "localAddress": "10.60.10.2", + "remoteAddress": "10.60.11.2", + "epTransportRef": [ + "EpTransport CU-F1c" + ] + }, + "EpTransport DU-F1c": { + "IpAddress": "10.60.11.2", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "100" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "A", + "EpApplicationRef": [ + "EP_F1C DU-F1c" + ] + }, + "EP_F1C DU-F1c": { + "localAddress": "10.60.11.2", + "remoteAddress": "10.60.10.2", + "epTransportRef": [ + "EpTransport DU-F1c" + ] + }, + "EpTransport CU-F1u1": { + "IpAddress": "10.60.10.2", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "101" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "B", + "EpApplicationRef": [ + "EP_F1U CU-F1u1" + ] + }, + "EP_F1U CU-F1u1": { + "localAddress": "10.60.10.2", + "remoteAddress": "10.60.11.2", + "epTransportRef": [ + "EpTransport CU-F1c" + ] + }, + "EpTransport DU-F1u1": { + "IpAddress": "10.60.11.2", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "101" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "B", + "EpApplicationRef": [ + "EP_F1U DU-F1u1" + ] + }, + "EP_F1U DU-F1u1": { + "localAddress": "10.60.11.2", + "remoteAddress": "10.60.10.2", + "epTransportRef": [ + "EpTransport DU-F1u1" + ] + }, + "EpTransport CU-F1u2": { + "IpAddress": "10.60.10.2", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "102" + }, + "NextHopInfo": "5.5.5.5", + "qosProfile": "C", + "EpApplicationRef": [ + "EP_F1U CU-F1u2" + ] + }, + "EP_F1U CU-F1u2": { + "localAddress": "10.60.10.2", + "remoteAddress": "10.60.11.2", + "epTransportRef": [ + "EpTransport CU-F1u2" + ] + }, + "EpTransport DU-F1u2": { + "IpAddress": "10.60.11.2", + "logicalInterfaceInfo": { + "logicalInterfaceType": "VLAN", + "logicalInterfaceId": "102" + }, + "NextHopInfo": "4.4.4.4", + "qosProfile": "C", + "EpApplicationRef": [ + "EP_F1U DU-F1u2" + ] + }, + "EP_F1U DU-F1u2": { + "localAddress": "10.60.11.2", + "remoteAddress": "10.60.10.2", + "epTransportRef": [ + "EpTransport DU-F1u2" + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/P2MP.json b/src/tests/requests/P2MP.json new file mode 100644 index 0000000000000000000000000000000000000000..02875dcf74a61c00b953525b93814ccd1167891a --- /dev/null +++ b/src/tests/requests/P2MP.json @@ -0,0 +1,108 @@ +{ + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + { + "id": "LOW-DELAY", + "description": "Prefer direct link: delay <= 2ms", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "two-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": 2 + } + ] + } + } + ] + }, + "slice-service": [ + { + "id": "slice-long", + "description": "Slice tolerant to intermediate hops", + "slo-sle-policy": { + "slo-sle-template": "LOW-DELAY" + }, + "sdps": { + "sdp": [ + { + "id": "T1.2", + "node-id": "T1.2", + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "ac-r1", + "ac-ipv4-address": "10.10.1.1", + "ac-ipv4-prefix-length": 24 + } + ] + } + }, + { + "id": "T1.1", + "node-id": "T1.1", + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "ac-r2", + "ac-ipv4-address": "10.10.2.1", + "ac-ipv4-prefix-length": 24 + } + ] + } + }, + { + "id": "T2.1", + "node-id": "T2.1", + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "ac-r3", + "ac-ipv4-address": "10.10.3.1", + "ac-ipv4-prefix-length": 24 + } + ] + } + }, + { + "id": "T1.3", + "node-id": "T1.3", + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "ac-r3", + "ac-ipv4-address": "10.10.4.1", + "ac-ipv4-prefix-length": 24 + } + ] + } + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "cg-long", + "connectivity-type": "ietf-vpn-common:any-to-any", + "connectivity-construct": [ + { + "id": "cc-p2mp", + "p2mp-sdp": { + "root-sdp-id": "T2.1", + "leaf-sdp-id": [ + "T1.1", + "T1.2", + "T1.3" + + ] + } + } + ] + } + ] + } + } + ] + } + } \ No newline at end of file diff --git a/src/tests/requests/create_slice_1.json b/src/tests/requests/create_slice_1.json new file mode 100644 index 0000000000000000000000000000000000000000..bcefe01525f7413a98f89c8fa5266b7c6cec8329 --- /dev/null +++ b/src/tests/requests/create_slice_1.json @@ -0,0 +1,81 @@ +{ + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + { + "id": "LOW-DELAY", + "description": "optical-slice", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "two-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": 2 + } + ] + } + } + ] + }, + "slice-service": [ + { + "id": "slice-long", + "description": "Slice tolerant to intermediate hops", + "slo-sle-policy": { + "slo-sle-template": "LOW-DELAY" + }, + "sdps": { + "sdp": [ + { + "id": "Ethernet110", + "node-id": "Phoenix-1", + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "ac-r1", + "ac-ipv4-address": "10.10.1.1", + "ac-ipv4-prefix-length": 24 + } + ] + } + }, + { + "id": "Ethernet220", + "node-id": "Phoenix-2", + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "ac-r2", + "ac-ipv4-address": "10.10.2.1", + "ac-ipv4-prefix-length": 24 + } + ] + } + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "cg-long", + "connectivity-type": "ietf-vpn-common:any-to-any", + "connectivity-construct": [ + { + "id": "cc-long", + "a2a-sdp": [ + { + "sdp-id": "Ethernet110" + }, + { + "sdp-id": "Ethernet220" + } + ] + } + ] + } + ] + } + } + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/ietf_green_request.json b/src/tests/requests/ietf_green_request.json new file mode 100644 index 0000000000000000000000000000000000000000..5edae753b82526b19b865cc8c834260d580679dd --- /dev/null +++ b/src/tests/requests/ietf_green_request.json @@ -0,0 +1,172 @@ +{ + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + { + "id": "B", + "description": "", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "energy_consumption", + "metric-unit": "kWh", + "bound": 20200 + }, + { + "metric-type": "energy_efficiency", + "metric-unit": "Wats/bps", + "bound": 6 + }, + { + "metric-type": "carbon_emission", + "metric-unit": "grams of CO2 per kWh", + "bound": 750 + }, + { + "metric-type": "renewable_energy_usage", + "metric-unit": "rate", + "bound": 0.5 + } + ] + }, + "sle-policy": { + "security": "", + "isolation": "", + "path-constraints": { + "service-functions": "", + "diversity": { + "diversity": { + "diversity-type": "" + } + } + } + } + } + ] + }, + "slice-service": [ + { + "id": "slice-service-88a585f7-a432-4312-8774-6210fb0b2342", + "description": "Transport network slice mapped with 3GPP slice NetworkSlice1", + "service-tags": { + "tag-type": [ + { + "tag-type": "service", + "tag-type-value": [ + "L2" + ] + } + ] + }, + "slo-sle-policy": { + "slo-sle-template": "B" + }, + "status": {}, + "sdps": { + "sdp": [ + { + "id": "A", + "geo-location": "", + "node-id": "CU-N32", + "sdp-ip-address": "10.60.11.3", + "tp-ref": "", + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": "VLAN", + "value": "101", + "target-connection-group-id": "CU-N32_UPF-N32" + } + ] + }, + "incoming-qos-policy": "", + "outgoing-qos-policy": "", + "sdp-peering": { + "peer-sap-id": "", + "protocols": "" + }, + "ac-svc-ref": [], + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "100", + "ac-ipv4-address": "10.60.11.3", + "ac-ipv4-prefix-length": 0, + "sdp-peering": { + "peer-sap-id": "4.4.4.4" + }, + "status": {} + } + ] + }, + "status": {}, + "sdp-monitoring": "" + }, + { + "id": "B", + "geo-location": "", + "node-id": "UPF-N32", + "sdp-ip-address": "10.60.10.6", + "tp-ref": "", + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": "VLAN", + "value": "101", + "target-connection-group-id": "CU-N32_UPF-N32" + } + ] + }, + "incoming-qos-policy": "", + "outgoing-qos-policy": "", + "sdp-peering": { + "peer-sap-id": "", + "protocols": "" + }, + "ac-svc-ref": [], + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "200", + "ac-ipv4-address": "10.60.10.6", + "ac-ipv4-prefix-length": 0, + "sdp-peering": { + "peer-sap-id": "5.5.5.5" + }, + "status": {} + } + ] + }, + "status": {}, + "sdp-monitoring": "" + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "CU-N32_UPF-N32", + "connectivity-type": "ietf-vpn-common:any-to-any", + "connectivity-construct": [ + { + "id": 1, + "a2a-sdp": [ + { + "sdp-id": "A" + }, + { + "sdp-id": "B" + } + ] + } + ], + "status": {} + } + ] + } + } + ] + } +} \ No newline at end of file diff --git a/src/tests/requests/l3vpn_test.json b/src/tests/requests/l3vpn_test.json new file mode 100644 index 0000000000000000000000000000000000000000..4564739ad6eea215a0a5b8892a597e110f706adc --- /dev/null +++ b/src/tests/requests/l3vpn_test.json @@ -0,0 +1,164 @@ +{ + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + { + "id": "A", + "description": "", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "one-way-bandwidth", + "metric-unit": "kbps", + "bound": 20000000.67 + }, + { + "metric-type": "one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": 5.5 + } + ], + "availability": 95, + "mtu": 1450 + }, + "sle-policy": { + "security": "", + "isolation": "", + "path-constraints": { + "service-functions": "", + "diversity": { + "diversity": { + "diversity-type": "" + } + } + } + } + } + ] + }, + "slice-service": [ + { + "id": "slice-service-91327140-7361-41b3-aa45-e84a7fb40b79", + "description": "Transport network slice mapped with 3GPP slice NetworkSlice1", + "service-tags": { + "tag-type": [ + { + "tag-type": "service", + "tag-type-value": [ + "L3" + ] + } + ] + }, + "slo-sle-policy": { + "slo-sle-template": "A" + }, + "status": {}, + "sdps": { + "sdp": [ + { + "id": "", + "geo-location": "", + "node-id": "CU-N2", + "sdp-ip-address": "10.60.11.3", + "tp-ref": "", + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": "VLAN", + "value": "100", + "target-connection-group-id": "CU-N2_AMF-N2" + } + ] + }, + "incoming-qos-policy": "", + "outgoing-qos-policy": "", + "sdp-peering": { + "peer-sap-id": "", + "protocols": "" + }, + "ac-svc-ref": [], + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "100", + "ac-ipv4-address": "10.60.11.3", + "ac-ipv4-prefix-length": 0, + "sdp-peering": { + "peer-sap-id": "1.1.1.1" + }, + "status": {} + } + ] + }, + "status": {}, + "sdp-monitoring": "" + }, + { + "id": "", + "geo-location": "", + "node-id": "AMF-N2", + "sdp-ip-address": "10.60.60.105", + "tp-ref": "", + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": "VLAN", + "value": "100", + "target-connection-group-id": "CU-N2_AMF-N2" + } + ] + }, + "incoming-qos-policy": "", + "outgoing-qos-policy": "", + "sdp-peering": { + "peer-sap-id": "", + "protocols": "" + }, + "ac-svc-ref": [], + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "200", + "ac-ipv4-address": "10.60.60.105", + "ac-ipv4-prefix-length": 0, + "sdp-peering": { + "peer-sap-id": "3.3.3.3" + }, + "status": {} + } + ] + }, + "status": {}, + "sdp-monitoring": "" + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "CU-N2_AMF-N2", + "connectivity-type": "ietf-vpn-common:any-to-any", + "connectivity-construct": [ + { + "id": 1, + "a2a-sdp": [ + { + "sdp-id": "01" + }, + { + "sdp-id": "02" + } + ] + } + ], + "status": {} + } + ] + } + } + ] + } + } \ No newline at end of file diff --git a/src/tests/requests/slice_request.json b/src/tests/requests/slice_request.json new file mode 100644 index 0000000000000000000000000000000000000000..f2150783ae098dc5e9511a986eb60c04f046282f --- /dev/null +++ b/src/tests/requests/slice_request.json @@ -0,0 +1,162 @@ +{ + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + { + "id": "A", + "description": "", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "one-way-bandwidth", + "metric-unit": "kbps", + "bound": 2000 + }, + { + "metric-type": "one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": 5 + } + ] + }, + "sle-policy": { + "security": "", + "isolation": "", + "path-constraints": { + "service-functions": "", + "diversity": { + "diversity": { + "diversity-type": "" + } + } + } + } + } + ] + }, + "slice-service": [ + { + "id": "slice-service-11327140-7361-41b3-aa45-e84a7fb40be9", + "description": "Transport network slice mapped with 3GPP slice NetworkSlice1", + "service-tags": { + "tag-type": [ + { + "tag-type": "service", + "tag-type-value": [ + "L2" + ] + } + ] + }, + "slo-sle-policy": { + "slo-sle-template": "A" + }, + "status": {}, + "sdps": { + "sdp": [ + { + "id": "", + "geo-location": "", + "node-id": "CU-N2", + "sdp-ip-address": "10.60.11.3", + "tp-ref": "", + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": "VLAN", + "value": "100", + "target-connection-group-id": "CU-N2_AMF-N2" + } + ] + }, + "incoming-qos-policy": "", + "outgoing-qos-policy": "", + "sdp-peering": { + "peer-sap-id": "", + "protocols": "" + }, + "ac-svc-ref": [], + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "100", + "ac-ipv4-address": "10.60.11.3", + "ac-ipv4-prefix-length": 0, + "sdp-peering": { + "peer-sap-id": "1.1.1.1" + }, + "status": {} + } + ] + }, + "status": {}, + "sdp-monitoring": "" + }, + { + "id": "", + "geo-location": "", + "node-id": "AMF-N2", + "sdp-ip-address": "10.60.60.105", + "tp-ref": "", + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": "VLAN", + "value": "100", + "target-connection-group-id": "CU-N2_AMF-N2" + } + ] + }, + "incoming-qos-policy": "", + "outgoing-qos-policy": "", + "sdp-peering": { + "peer-sap-id": "", + "protocols": "" + }, + "ac-svc-ref": [], + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "200", + "ac-ipv4-address": "10.60.60.105", + "ac-ipv4-prefix-length": 0, + "sdp-peering": { + "peer-sap-id": "3.3.3.3" + }, + "status": {} + } + ] + }, + "status": {}, + "sdp-monitoring": "" + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "CU-N2_AMF-N2", + "connectivity-type": "ietf-vpn-common:any-to-any", + "connectivity-construct": [ + { + "id": 1, + "a2a-sdp": [ + { + "sdp-id": "01" + }, + { + "sdp-id": "02" + } + ] + } + ], + "status": {} + } + ] + } + } + ] + } + } \ No newline at end of file diff --git a/src/tests/test_api.py b/src/tests/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..7264ab9b0ce115c8c318efda6e078334c6cb76b4 --- /dev/null +++ b/src/tests/test_api.py @@ -0,0 +1,301 @@ +import json +import pytest +import os +from unittest.mock import patch, Mock, MagicMock +from pathlib import Path +from dotenv import load_dotenv +import sqlite3 +import time +from flask import Flask +from src.main import NSController +from src.api.main import Api + + +# Load environment variables +load_dotenv() + +@pytest.fixture(scope="session") +def flask_app(): + """Crea una app Flask mínima para los tests.""" + app = Flask(__name__) + app.config.update({ + "TESTING": True, + "SERVER_NAME": "localhost", + 'NRP_ENABLED': os.getenv('NRP_ENABLED', 'False').lower() == 'true', + 'PLANNER_ENABLED': os.getenv('PLANNER_ENABLED', 'False').lower() == 'true', + 'PCE_EXTERNAL': os.getenv('PCE_EXTERNAL', 'False').lower() == 'true', + 'DUMMY_MODE': os.getenv('DUMMY_MODE', 'True').lower() == 'true', + 'DUMP_TEMPLATES': os.getenv('DUMP_TEMPLATES', 'False').lower() == 'true', + 'TFS_L2VPN_SUPPORT': os.getenv('TFS_L2VPN_SUPPORT', 'False').lower() == 'true', + 'WEBUI_DEPLOY': os.getenv('WEBUI_DEPLOY', 'True').lower() == 'true', + 'UPLOAD_TYPE': os.getenv('UPLOAD_TYPE', 'WEBUI'), + 'PLANNER_TYPE': os.getenv('PLANNER_TYPE', 'ENERGY'), + 'HRAT_IP' : os.getenv('HRAT_IP', '10.0.0.1'), + 'OPTICAL_PLANNER_IP' : os.getenv('OPTICAL_PLANNER_IP', '10.0.0.1') + }) + return app + + +@pytest.fixture(autouse=True) +def push_flask_context(flask_app): + """Empuja automáticamente un contexto Flask para cada test.""" + with flask_app.app_context(): + yield + +@pytest.fixture +def temp_db(tmp_path): + """Fixture to create and cleanup test database using SQLite instead of JSON.""" + test_db_name = str(tmp_path / "test_slice.db") + + # Create database with proper schema + conn = sqlite3.connect(test_db_name) + cursor = conn.cursor() + cursor.execute(""" + CREATE TABLE IF NOT EXISTS slice ( + slice_id TEXT PRIMARY KEY, + intent TEXT NOT NULL, + controller TEXT NOT NULL + ) + """) + conn.commit() + conn.close() + + yield test_db_name + + # Cleanup - properly close connections and remove file + try: + time.sleep(0.1) + if os.path.exists(test_db_name): + os.remove(test_db_name) + except Exception: + time.sleep(0.5) + try: + if os.path.exists(test_db_name): + os.remove(test_db_name) + except: + pass + + +@pytest.fixture +def env_variables(): + """Fixture to load and provide environment variables.""" + env_vars = { + 'NRP_ENABLED': os.getenv('NRP_ENABLED', 'False').lower() == 'true', + 'PLANNER_ENABLED': os.getenv('PLANNER_ENABLED', 'False').lower() == 'true', + 'PCE_EXTERNAL': os.getenv('PCE_EXTERNAL', 'False').lower() == 'true', + 'DUMMY_MODE': os.getenv('DUMMY_MODE', 'True').lower() == 'true', + 'DUMP_TEMPLATES': os.getenv('DUMP_TEMPLATES', 'False').lower() == 'true', + 'TFS_L2VPN_SUPPORT': os.getenv('TFS_L2VPN_SUPPORT', 'False').lower() == 'true', + 'WEBUI_DEPLOY': os.getenv('WEBUI_DEPLOY', 'True').lower() == 'true', + 'UPLOAD_TYPE': os.getenv('UPLOAD_TYPE', 'WEBUI'), + 'PLANNER_TYPE': os.getenv('PLANNER_TYPE', 'standard'), + } + return env_vars + + +@pytest.fixture +def controller_with_mocked_db(temp_db): + """Crea un NSController con base de datos mockeada.""" + with patch('src.database.db.DB_NAME', temp_db): + yield NSController(controller_type="TFS") + + +@pytest.fixture +def ietf_intent(): + """Intent válido en formato IETF.""" + return { + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + { + "id": "qos1", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "one-way-bandwidth", + "metric-unit": "kbps", + "bound": 1000 + } + ] + } + } + ] + }, + "slice-service": [ + { + "id": "slice-test-1", + "sdps": { + "sdp": [ + { + "sdp-ip-address": "10.0.0.1", + "node-id": "node1", + "service-match-criteria": { + "match-criterion": [ + { + "match-type": "vlan", + "value": "100" + } + ] + }, + "attachment-circuits": { + "attachment-circuit": [ + { + "sdp-peering": { + "peer-sap-id": "R1" + } + } + ] + }, + }, + { + "sdp-ip-address": "10.0.0.2", + "node-id": "node2", + "service-match-criteria": { + "match-criterion": [ + { + "match-type": "vlan", + "value": "100" + } + ] + }, + "attachment-circuits": { + "attachment-circuit": [ + { + "sdp-peering": { + "peer-sap-id": "R2" + } + } + ] + }, + }, + ] + }, + "service-tags": {"tag-type": {"value": "L3VPN"}}, + } + ], + } + } + + +class TestBasicApiOperations: + """Tests for basic API operations.""" + + def test_get_flows_empty(self, controller_with_mocked_db): + """Debe devolver error cuando no hay slices.""" + result, code = Api(controller_with_mocked_db).get_flows() + assert code == 404 + assert result["success"] is False + assert result["data"] is None + + def test_add_flow_success(self, controller_with_mocked_db, ietf_intent): + """Debe poder añadir un flow exitosamente.""" + with patch('src.database.db.save_data') as mock_save: + result, code = Api(controller_with_mocked_db).add_flow(ietf_intent) + assert code == 201 + assert result["success"] is True + assert "slices" in result["data"] + + def test_add_and_get_flow(self, controller_with_mocked_db, ietf_intent): + """Debe poder añadir un flow y luego recuperarlo.""" + with patch('src.database.db.save_data') as mock_save, \ + patch('src.database.db.get_all_data') as mock_get_all: + + Api(controller_with_mocked_db).add_flow(ietf_intent) + + mock_get_all.return_value = [ + { + "slice_id": "slice-test-1", + "intent": ietf_intent, + "controller": "TFS" + } + ] + + flows, code = Api(controller_with_mocked_db).get_flows() + assert code == 200 + assert any(s["slice_id"] == "slice-test-1" for s in flows) + + def test_modify_flow_success(self, controller_with_mocked_db, ietf_intent): + """Debe poder modificar un flow existente.""" + with patch('src.database.db.update_data') as mock_update: + Api(controller_with_mocked_db).add_flow(ietf_intent) + new_intent = ietf_intent.copy() + new_intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["id"] = "qos2" + + result, code = Api(controller_with_mocked_db).modify_flow("slice-test-1", new_intent) + print(result) + assert code == 200 + assert result["success"] is True + + def test_delete_specific_flow_success(self, controller_with_mocked_db, ietf_intent): + """Debe borrar un flow concreto.""" + with patch('src.database.db.delete_data') as mock_delete: + Api(controller_with_mocked_db).add_flow(ietf_intent) + result, code = Api(controller_with_mocked_db).delete_flows("slice-test-1") + assert code == 204 + assert result == {} + + def test_delete_all_flows_success(self, controller_with_mocked_db): + """Debe borrar todos los flows.""" + with patch('src.database.db.delete_all_data') as mock_delete_all: + result, code = Api(controller_with_mocked_db).delete_flows() + assert code == 204 + assert result == {} + + def test_get_specific_flow(self, controller_with_mocked_db, ietf_intent): + """Debe poder recuperar un flow específico.""" + with patch('src.database.db.get_data') as mock_get: + Api(controller_with_mocked_db).add_flow(ietf_intent) + mock_get.return_value = { + "slice_id": "slice-test-1", + "intent": ietf_intent, + "controller": "TFS" + } + + result, code = Api(controller_with_mocked_db).get_flows("slice-test-1") + assert code == 200 + assert result["slice_id"] == "slice-test-1" + + +class TestErrorHandling: + """Tests for error handling.""" + + def test_add_flow_with_empty_intent(self, controller_with_mocked_db): + """Debe fallar si se pasa un intent vacío.""" + result, code = Api(controller_with_mocked_db).add_flow({}) + assert code in (400, 404, 500) + assert result["success"] is False + + def test_add_flow_with_none(self, controller_with_mocked_db): + """Debe fallar si se pasa None como intent.""" + result, code = Api(controller_with_mocked_db).add_flow(None) + assert code in (400, 500) + assert result["success"] is False + + def test_get_nonexistent_slice(self, controller_with_mocked_db): + """Debe devolver 404 si se pide un slice inexistente.""" + with patch('src.database.db.get_data') as mock_get: + mock_get.side_effect = ValueError("No slice found") + + result, code = Api(controller_with_mocked_db).get_flows("slice-does-not-exist") + assert code == 404 + assert result["success"] is False + + def test_modify_nonexistent_flow(self, controller_with_mocked_db, ietf_intent): + """Debe fallar si se intenta modificar un flow inexistente.""" + with patch('src.database.db.update_data') as mock_update: + mock_update.side_effect = ValueError("No slice found") + + result, code = Api(controller_with_mocked_db).modify_flow("nonexistent", ietf_intent) + assert code == 404 + assert result["success"] is False + + def test_delete_nonexistent_flow(self, controller_with_mocked_db): + """Debe fallar si se intenta eliminar un flow inexistente.""" + with patch('src.database.db.delete_data') as mock_delete: + mock_delete.side_effect = ValueError("No slice found") + + result, code = Api(controller_with_mocked_db).delete_flows("nonexistent") + assert code == 404 + assert result["success"] is False + + diff --git a/src/tests/test_database.py b/src/tests/test_database.py new file mode 100644 index 0000000000000000000000000000000000000000..06034eb6be1ea730e75878d6aba337c515093d83 --- /dev/null +++ b/src/tests/test_database.py @@ -0,0 +1,585 @@ +import pytest +import sqlite3 +import json +import os +import time +from unittest.mock import patch, MagicMock +from src.database.db import ( + init_db, + save_data, + update_data, + delete_data, + get_data, + get_all_data, + delete_all_data, + DB_NAME +) +from src.database.store_data import store_data + + +@pytest.fixture +def test_db(tmp_path): + """Fixture to create and cleanup test database.""" + test_db_name = str(tmp_path / "test_slice.db") + + # Use test database + with patch('src.database.db.DB_NAME', test_db_name): + conn = sqlite3.connect(test_db_name) + cursor = conn.cursor() + cursor.execute(""" + CREATE TABLE IF NOT EXISTS slice ( + slice_id TEXT PRIMARY KEY, + intent TEXT NOT NULL, + controller TEXT NOT NULL + ) + """) + conn.commit() + conn.close() + + yield test_db_name + + # Cleanup - Close all connections and remove file + try: + # Force SQLite to release locks + sqlite3.connect(':memory:').execute('VACUUM').close() + + # Wait a moment for file locks to release + import time + time.sleep(0.1) + + # Remove the file if it exists + if os.path.exists(test_db_name): + os.remove(test_db_name) + except Exception as e: + # On Windows, sometimes files are locked. Try again after a delay + import time + time.sleep(0.5) + try: + if os.path.exists(test_db_name): + os.remove(test_db_name) + except: + pass # If it still fails, let pytest's tmp_path cleanup handle it + + +@pytest.fixture +def sample_intent(): + """Fixture providing sample network slice intent.""" + return { + "ietf-network-slice-service:network-slice-services": { + "slice-service": [{ + "id": "slice-service-12345", + "description": "Test network slice", + "service-tags": {"tag-type": {"value": "L2VPN"}}, + "sdps": { + "sdp": [{ + "node-id": "node1", + "sdp-ip-address": "10.0.0.1" + }] + } + }], + "slo-sle-templates": { + "slo-sle-template": [{ + "id": "profile1", + "slo-policy": { + "metric-bound": [{ + "metric-type": "one-way-bandwidth", + "metric-unit": "kbps", + "bound": 1000 + }] + } + }] + } + } + } + + +@pytest.fixture +def simple_intent(): + """Fixture providing simple intent for basic testing.""" + return { + "bandwidth": "1Gbps", + "latency": "10ms", + "provider": "opensec" + } + + +class TestInitDb: + """Tests for database initialization.""" + + def test_init_db_creates_table(self, tmp_path): + """Test that init_db creates the slice table.""" + test_db = str(tmp_path / "test.db") + + with patch('src.database.db.DB_NAME', test_db): + init_db() + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='slice'") + result = cursor.fetchone() + conn.close() + time.sleep(0.05) # Brief pause for file lock release + + assert result is not None + assert result[0] == 'slice' + + def test_init_db_creates_correct_columns(self, tmp_path): + """Test that init_db creates table with correct columns.""" + test_db = str(tmp_path / "test.db") + + with patch('src.database.db.DB_NAME', test_db): + init_db() + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("PRAGMA table_info(slice)") + columns = cursor.fetchall() + conn.close() + time.sleep(0.05) + + column_names = [col[1] for col in columns] + assert "slice_id" in column_names + assert "intent" in column_names + assert "controller" in column_names + + def test_init_db_idempotent(self, tmp_path): + """Test that init_db can be called multiple times without error.""" + test_db = str(tmp_path / "test.db") + + with patch('src.database.db.DB_NAME', test_db): + init_db() + init_db() # Should not raise error + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='slice'") + result = cursor.fetchone() + conn.close() + time.sleep(0.05) + + assert result is not None + + +class TestSaveData: + """Tests for save_data function.""" + + def test_save_data_success(self, test_db, simple_intent): + """Test successful data saving.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-001",)) + result = cursor.fetchone() + conn.close() + + assert result is not None + assert result[0] == "slice-001" + assert result[2] == "TFS" + assert json.loads(result[1]) == simple_intent + + def test_save_data_with_complex_intent(self, test_db, sample_intent): + """Test saving complex nested intent structure.""" + with patch('src.database.db.DB_NAME', test_db): + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + save_data(slice_id, sample_intent, "IXIA") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT intent FROM slice WHERE slice_id = ?", (slice_id,)) + result = cursor.fetchone() + conn.close() + + retrieved_intent = json.loads(result[0]) + assert retrieved_intent == sample_intent + + def test_save_data_duplicate_slice_id_raises_error(self, test_db, simple_intent): + """Test that saving duplicate slice_id raises ValueError.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + + with pytest.raises(ValueError, match="already exists"): + save_data("slice-001", simple_intent, "TFS") + + def test_save_data_multiple_slices(self, test_db, simple_intent): + """Test saving multiple different slices.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + save_data("slice-002", simple_intent, "IXIA") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT COUNT(*) FROM slice") + count = cursor.fetchone()[0] + conn.close() + + assert count == 2 + + def test_save_data_with_different_controllers(self, test_db, simple_intent): + """Test saving data with different controller types.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-tfs", simple_intent, "TFS") + save_data("slice-ixia", simple_intent, "IXIA") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-tfs",)) + tfs_result = cursor.fetchone() + cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-ixia",)) + ixia_result = cursor.fetchone() + conn.close() + + assert tfs_result[0] == "TFS" + assert ixia_result[0] == "IXIA" + + +class TestUpdateData: + """Tests for update_data function.""" + + def test_update_data_success(self, test_db, simple_intent): + """Test successful data update.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + + updated_intent = {"bandwidth": "2Gbps", "latency": "5ms", "provider": "opensec"} + update_data("slice-001", updated_intent, "TFS") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT intent FROM slice WHERE slice_id = ?", ("slice-001",)) + result = cursor.fetchone() + conn.close() + + retrieved_intent = json.loads(result[0]) + assert retrieved_intent == updated_intent + + def test_update_data_nonexistent_slice_raises_error(self, test_db, simple_intent): + """Test that updating nonexistent slice raises ValueError.""" + with patch('src.database.db.DB_NAME', test_db): + with pytest.raises(ValueError, match="No slice found"): + update_data("nonexistent-slice", simple_intent, "TFS") + + def test_update_data_controller_type(self, test_db, simple_intent): + """Test updating controller type.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + update_data("slice-001", simple_intent, "IXIA") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT controller FROM slice WHERE slice_id = ?", ("slice-001",)) + result = cursor.fetchone() + conn.close() + + assert result[0] == "IXIA" + + def test_update_data_complex_intent(self, test_db, sample_intent): + """Test updating with complex nested structure.""" + with patch('src.database.db.DB_NAME', test_db): + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + save_data(slice_id, sample_intent, "TFS") + + updated_sample = sample_intent.copy() + updated_sample["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = "Updated description" + + update_data(slice_id, updated_sample, "IXIA") + + retrieved = get_data(slice_id) + assert retrieved["intent"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] == "Updated description" + assert retrieved["controller"] == "IXIA" + + +class TestDeleteData: + """Tests for delete_data function.""" + + def test_delete_data_success(self, test_db, simple_intent): + """Test successful data deletion.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + delete_data("slice-001") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-001",)) + result = cursor.fetchone() + conn.close() + + assert result is None + + def test_delete_data_nonexistent_slice_raises_error(self, test_db): + """Test that deleting nonexistent slice raises ValueError.""" + with patch('src.database.db.DB_NAME', test_db): + with pytest.raises(ValueError, match="No slice found"): + delete_data("nonexistent-slice") + + def test_delete_data_multiple_slices(self, test_db, simple_intent): + """Test deleting one slice doesn't affect others.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + save_data("slice-002", simple_intent, "IXIA") + + delete_data("slice-001") + + conn = sqlite3.connect(test_db) + cursor = conn.cursor() + cursor.execute("SELECT COUNT(*) FROM slice") + count = cursor.fetchone()[0] + cursor.execute("SELECT * FROM slice WHERE slice_id = ?", ("slice-002",)) + remaining = cursor.fetchone() + conn.close() + + assert count == 1 + assert remaining[0] == "slice-002" + + +class TestGetData: + """Tests for get_data function.""" + + def test_get_data_success(self, test_db, simple_intent): + """Test retrieving existing data.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + result = get_data("slice-001") + + assert result["slice_id"] == "slice-001" + assert result["intent"] == simple_intent + assert result["controller"] == "TFS" + + def test_get_data_nonexistent_raises_error(self, test_db): + """Test that getting nonexistent slice raises ValueError.""" + with patch('src.database.db.DB_NAME', test_db): + with pytest.raises(ValueError, match="No slice found"): + get_data("nonexistent-slice") + + def test_get_data_json_parsing(self, test_db, sample_intent): + """Test that returned intent is parsed JSON.""" + with patch('src.database.db.DB_NAME', test_db): + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + save_data(slice_id, sample_intent, "TFS") + result = get_data(slice_id) + + assert isinstance(result["intent"], dict) + assert result["intent"] == sample_intent + + def test_get_data_returns_all_fields(self, test_db, simple_intent): + """Test that get_data returns all fields.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + result = get_data("slice-001") + + assert "slice_id" in result + assert "intent" in result + assert "controller" in result + assert len(result) == 3 + + +class TestGetAllData: + """Tests for get_all_data function.""" + + def test_get_all_data_empty_database(self, test_db): + """Test retrieving all data from empty database.""" + with patch('src.database.db.DB_NAME', test_db): + result = get_all_data() + assert result == [] + + def test_get_all_data_single_slice(self, test_db, simple_intent): + """Test retrieving all data with single slice.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + result = get_all_data() + + assert len(result) == 1 + assert result[0]["slice_id"] == "slice-001" + assert result[0]["intent"] == simple_intent + + def test_get_all_data_multiple_slices(self, test_db, simple_intent): + """Test retrieving all data with multiple slices.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + save_data("slice-002", simple_intent, "IXIA") + save_data("slice-003", simple_intent, "TFS") + + result = get_all_data() + + assert len(result) == 3 + slice_ids = [slice_data["slice_id"] for slice_data in result] + assert "slice-001" in slice_ids + assert "slice-002" in slice_ids + assert "slice-003" in slice_ids + + def test_get_all_data_json_parsing(self, test_db, sample_intent): + """Test that all returned intents are parsed JSON.""" + with patch('src.database.db.DB_NAME', test_db): + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + save_data(slice_id, sample_intent, "TFS") + save_data("slice-002", sample_intent, "IXIA") + + result = get_all_data() + + for slice_data in result: + assert isinstance(slice_data["intent"], dict) + + def test_get_all_data_includes_all_controllers(self, test_db, simple_intent): + """Test that get_all_data includes slices from different controllers.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-tfs", simple_intent, "TFS") + save_data("slice-ixia", simple_intent, "IXIA") + + result = get_all_data() + + controllers = [slice_data["controller"] for slice_data in result] + assert "TFS" in controllers + assert "IXIA" in controllers + + +class TestDeleteAllData: + """Tests for delete_all_data function.""" + + def test_delete_all_data_removes_all_slices(self, test_db, simple_intent): + """Test that delete_all_data removes all slices.""" + with patch('src.database.db.DB_NAME', test_db): + save_data("slice-001", simple_intent, "TFS") + save_data("slice-002", simple_intent, "IXIA") + + delete_all_data() + + result = get_all_data() + assert result == [] + + def test_delete_all_data_empty_database(self, test_db): + """Test delete_all_data on empty database doesn't raise error.""" + with patch('src.database.db.DB_NAME', test_db): + delete_all_data() # Should not raise error + result = get_all_data() + assert result == [] + + +class TestStoreData: + """Tests for store_data wrapper function.""" + + def test_store_data_save_new_slice(self, test_db, sample_intent): + """Test store_data saves new slice without slice_id.""" + with patch('src.database.db.DB_NAME', test_db): + store_data(sample_intent, None, "TFS") + + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + result = get_data(slice_id) + + assert result["slice_id"] == slice_id + assert result["intent"] == sample_intent + assert result["controller"] == "TFS" + + def test_store_data_update_existing_slice(self, test_db, sample_intent): + """Test store_data updates existing slice when slice_id provided.""" + with patch('src.database.db.DB_NAME', test_db): + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + + # Save initial data + save_data(slice_id, sample_intent, "TFS") + + # Update with store_data + updated_intent = sample_intent.copy() + updated_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] = "Updated" + store_data(updated_intent, slice_id, "IXIA") + + result = get_data(slice_id) + assert result["controller"] == "IXIA" + assert result["intent"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["description"] == "Updated" + + def test_store_data_extracts_slice_id_from_intent(self, test_db, sample_intent): + """Test store_data correctly extracts slice_id from intent structure.""" + with patch('src.database.db.DB_NAME', test_db): + store_data(sample_intent, None, "TFS") + + all_data = get_all_data() + assert len(all_data) == 1 + assert all_data[0]["slice_id"] == "slice-service-12345" + + def test_store_data_with_different_controllers(self, test_db, sample_intent): + """Test store_data works with different controller types.""" + with patch('src.database.db.DB_NAME', test_db): + store_data(sample_intent, None, "TFS") + + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + result = get_data(slice_id) + + assert result["controller"] == "TFS" + + +class TestDatabaseIntegration: + """Integration tests for database operations.""" + + def test_full_lifecycle_create_read_update_delete(self, test_db, simple_intent): + """Test complete slice lifecycle.""" + with patch('src.database.db.DB_NAME', test_db): + # Create + save_data("slice-lifecycle", simple_intent, "TFS") + + # Read + result = get_data("slice-lifecycle") + assert result["slice_id"] == "slice-lifecycle" + + # Update + updated_intent = {"bandwidth": "5Gbps", "latency": "2ms", "provider": "opensec"} + update_data("slice-lifecycle", updated_intent, "IXIA") + + result = get_data("slice-lifecycle") + assert result["intent"] == updated_intent + assert result["controller"] == "IXIA" + + # Delete + delete_data("slice-lifecycle") + + with pytest.raises(ValueError): + get_data("slice-lifecycle") + + def test_concurrent_operations(self, test_db, simple_intent): + """Test multiple concurrent database operations.""" + with patch('src.database.db.DB_NAME', test_db): + # Create multiple slices + for i in range(5): + save_data(f"slice-{i}", simple_intent, "TFS" if i % 2 == 0 else "IXIA") + + # Verify all created + all_data = get_all_data() + assert len(all_data) == 5 + + # Update some + updated_intent = {"updated": True} + for i in range(0, 3): + update_data(f"slice-{i}", updated_intent, "TFS") + + # Verify updates + for i in range(0, 3): + result = get_data(f"slice-{i}") + assert result["intent"]["updated"] is True + + # Delete some + delete_data("slice-0") + delete_data("slice-2") + + all_data = get_all_data() + assert len(all_data) == 3 + + def test_data_persistence_across_operations(self, test_db, sample_intent): + """Test that data persists correctly across multiple operations.""" + with patch('src.database.db.DB_NAME', test_db): + slice_id = sample_intent["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["id"] + + # Save + save_data(slice_id, sample_intent, "TFS") + + # Get all and verify + all_before = get_all_data() + assert len(all_before) == 1 + + # Save another + save_data("slice-other", sample_intent, "IXIA") + all_after = get_all_data() + assert len(all_after) == 2 + + # Verify first slice still intact + first_slice = get_data(slice_id) + assert first_slice["intent"] == sample_intent + assert first_slice["controller"] == "TFS" \ No newline at end of file diff --git a/src/tests/test_e2e.py b/src/tests/test_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb91405568b415b4ef7b5148354fc825e3372a4 --- /dev/null +++ b/src/tests/test_e2e.py @@ -0,0 +1,101 @@ +import pytest +import json +from pathlib import Path +from itertools import product +from src.api.main import Api +from src.main import NSController +from app import create_app + +# Carpeta donde están los JSON de requests +REQUESTS_DIR = Path(__file__).parent / "requests" + +# Lista de todos los flags booleanos que quieres probar +FLAGS_TO_TEST = ["WEBUI_DEPLOY", "DUMP_TEMPLATES", "PLANNER_ENABLED", "PCE_EXTERNAL", "NRP_ENABLED"] + +# Valores posibles para PLANNER_TYPE +PLANNER_TYPE_VALUES = ["ENERGY", "HRAT", "TFS_OPTICAL"] + + +@pytest.fixture +def app(temp_sqlite_db): + """Crea la app Flask con configuración por defecto.""" + app = create_app() + return app + +@pytest.fixture +def client(app): + """Cliente de test de Flask para hacer requests.""" + return app.test_client() + +@pytest.fixture +def set_flags(app): + """Cambia directamente los flags en app.config""" + def _set(flags: dict): + for k, v in flags.items(): + app.config[k] = v + return _set + +@pytest.fixture +def temp_sqlite_db(monkeypatch, tmp_path): + """Usa una base de datos SQLite temporal durante los tests.""" + temp_db_path = tmp_path / "test_slice.db" + monkeypatch.setattr("src.database.db.DB_NAME", str(temp_db_path)) + + # Inicializa la base de datos temporal + from src.database.db import init_db + init_db() + + yield temp_db_path + + # Limpieza al finalizar + if temp_db_path.exists(): + temp_db_path.unlink() + +# Función para cargar todos los JSONs +def load_request_files(): + test_cases = [] + for f in REQUESTS_DIR.glob("*.json"): + with open(f, "r") as file: + json_data = json.load(file) + test_cases.append(json_data) + return test_cases + +# Generador de todas las combinaciones de flags +def generate_flag_combinations(): + bool_values = [True, False] + for combo in product(bool_values, repeat=len(FLAGS_TO_TEST)): + bool_flags = dict(zip(FLAGS_TO_TEST, combo)) + for planner_type in PLANNER_TYPE_VALUES: + yield {**bool_flags, "PLANNER_TYPE": planner_type} + + +# Fixture que combina cada request con cada combinación de flags +def generate_test_cases(): + requests = load_request_files() + for json_data in requests: + for flags in generate_flag_combinations(): + expected_codes = [200,201] + yield (json_data, flags, expected_codes) + +@pytest.mark.parametrize( + "json_data, flags, expected_codes", + list(generate_test_cases()) +) +def test_add_and_delete_flow(app, json_data, flags, expected_codes, set_flags, temp_sqlite_db): + with app.app_context(): + set_flags(flags) + + controller = NSController(controller_type="TFS") + api = Api(controller) + + # Añadir flujo + data, code = api.add_flow(json_data) + assert code in expected_codes, f"Flags en fallo: {flags}" + + # Eliminar flujo si fue creado + if code == 201 and isinstance(data, dict) and "slice_id" in data: + slice_id = data["slice_id"] + _, delete_code = api.delete_flows(slice_id=slice_id) + assert delete_code == 204, f"No se pudo eliminar el slice {slice_id}" + + diff --git a/src/tests/test_initialization.py b/src/tests/test_initialization.py new file mode 100644 index 0000000000000000000000000000000000000000..c51cc0659a4d409781c1cdebf7b0512f158d38e6 --- /dev/null +++ b/src/tests/test_initialization.py @@ -0,0 +1,37 @@ +import pytest + +# Importa tu clase (ajusta el nombre del módulo si es distinto) +from src.main import NSController + +def test_init_default_values(): + """Test that default initialization sets expected values.""" + controller = NSController() + + # Atributo configurable + assert controller.controller_type == "TFS" + + # Atributos internos + assert controller.path == "" + assert controller.response == [] + assert controller.start_time == 0 + assert controller.end_time == 0 + assert controller.setup_time == 0 + +@pytest.mark.parametrize("controller_type", ["TFS", "IXIA", "custom"]) +def test_init_controller_type(controller_type): + """Test initialization with different controller types.""" + controller = NSController(controller_type=controller_type) + assert controller.controller_type == controller_type + +def test_init_independence_between_instances(): + """Test that each instance has independent state (mutable attrs).""" + c1 = NSController() + c2 = NSController() + + # Modifico una lista en una instancia + c1.response.append("test-response") + + # La otra instancia no debería verse afectada + assert c2.response == [] + assert c1.response == ["test-response"] + diff --git a/src/tests/test_mapper.py b/src/tests/test_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..219923f0bf70e6920398e945a14205dd913baab9 --- /dev/null +++ b/src/tests/test_mapper.py @@ -0,0 +1,639 @@ +import pytest +import logging +from unittest.mock import patch, MagicMock, call +from flask import Flask +from src.mapper.main import mapper +from src.mapper.slo_viability import slo_viability + + +@pytest.fixture +def sample_ietf_intent(): + """Fixture providing sample IETF network slice intent.""" + return { + "ietf-network-slice-service:network-slice-services": { + "slice-service": [{ + "id": "slice-service-12345", + "description": "Test network slice", + "service-tags": {"tag-type": {"value": "L2VPN"}} + }], + "slo-sle-templates": { + "slo-sle-template": [{ + "id": "profile1", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "one-way-bandwidth", + "metric-unit": "kbps", + "bound": 1000 + }, + { + "metric-type": "one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": 10 + } + ] + } + }] + } + } + } + + +@pytest.fixture +def sample_nrp_view(): + """Fixture providing sample NRP view.""" + return [ + { + "id": "nrp-1", + "available": True, + "slices": [], + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 1500 + }, + { + "metric-type": "one-way-delay-maximum", + "bound": 8 + } + ] + }, + { + "id": "nrp-2", + "available": True, + "slices": [], + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 500 + }, + { + "metric-type": "one-way-delay-maximum", + "bound": 15 + } + ] + }, + { + "id": "nrp-3", + "available": False, + "slices": [], + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 2000 + }, + { + "metric-type": "one-way-delay-maximum", + "bound": 5 + } + ] + } + ] + + +@pytest.fixture +def mock_app(): + """Fixture providing mock Flask app context.""" + app = Flask(__name__) + app.config = { + "NRP_ENABLED": False, + "PLANNER_ENABLED": False, + "SERVER_NAME": "localhost", + "APPLICATION_ROOT": "/", + "PREFERRED_URL_SCHEME": "http" + } + return app + + +@pytest.fixture +def app_context(mock_app): + """Fixture providing Flask application context.""" + with mock_app.app_context(): + yield mock_app + + +class TestSloViability: + """Tests for slo_viability function.""" + + def test_slo_viability_meets_all_requirements(self): + """Test when NRP meets all SLO requirements.""" + slice_slos = [ + { + "metric-type": "one-way-bandwidth", + "bound": 1000 + }, + { + "metric-type": "one-way-delay-maximum", + "bound": 10 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 1500 + }, + { + "metric-type": "one-way-delay-maximum", + "bound": 8 + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is True + assert score > 0 + + def test_slo_viability_fails_bandwidth_minimum(self): + """Test when NRP doesn't meet minimum bandwidth requirement.""" + slice_slos = [ + { + "metric-type": "one-way-bandwidth", + "bound": 1000 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 500 # Less than required + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is False + assert score == 0 + + def test_slo_viability_fails_delay_maximum(self): + """Test when NRP doesn't meet maximum delay requirement.""" + slice_slos = [ + { + "metric-type": "one-way-delay-maximum", + "bound": 10 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-delay-maximum", + "bound": 15 # Greater than maximum allowed + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is False + assert score == 0 + + def test_slo_viability_multiple_metrics_partial_failure(self): + """Test when one metric fails in a multi-metric comparison.""" + slice_slos = [ + { + "metric-type": "one-way-bandwidth", + "bound": 1000 + }, + { + "metric-type": "one-way-delay-maximum", + "bound": 10 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 1500 # OK + }, + { + "metric-type": "one-way-delay-maximum", + "bound": 15 # NOT OK + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is False + assert score == 0 + + def test_slo_viability_flexibility_score_calculation(self): + """Test flexibility score calculation.""" + slice_slos = [ + { + "metric-type": "one-way-bandwidth", + "bound": 1000 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 2000 # 100% better than requirement + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is True + # Flexibility = (2000 - 1000) / 1000 = 1.0 + assert score == 1.0 + + def test_slo_viability_empty_slos(self): + """Test with empty SLO list.""" + slice_slos = [] + nrp_slos = {"slos": []} + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is True + assert score == 0 + + def test_slo_viability_no_matching_metrics(self): + """Test when there are no matching metric types.""" + slice_slos = [ + { + "metric-type": "one-way-bandwidth", + "bound": 1000 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "two-way-bandwidth", + "bound": 1500 + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + # Should still return True as no metrics failed + assert viable is True + assert score == 0 + + def test_slo_viability_packet_loss_maximum_type(self): + """Test packet loss as maximum constraint type.""" + slice_slos = [ + { + "metric-type": "one-way-packet-loss", + "bound": 0.01 # 1% maximum acceptable + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-packet-loss", + "bound": 0.005 # 0.5% NRP loss + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is True + assert score > 0 + + +class TestMapper: + """Tests for mapper function.""" + + def test_mapper_with_nrp_disabled_and_planner_disabled(self, app_context, sample_ietf_intent): + """Test mapper when both NRP and Planner are disabled.""" + app_context.config = { + "NRP_ENABLED": False, + "PLANNER_ENABLED": False + } + + result = mapper(sample_ietf_intent) + + assert result is None + + @patch('src.mapper.main.Planner') + def test_mapper_with_planner_enabled(self, mock_planner_class, app_context, sample_ietf_intent): + """Test mapper when Planner is enabled.""" + app_context.config = { + "NRP_ENABLED": False, + "PLANNER_ENABLED": True, + "PLANNER_TYPE":"ENERGY" + } + + mock_planner_instance = MagicMock() + mock_planner_instance.planner.return_value = {"path": "node1->node2->node3"} + mock_planner_class.return_value = mock_planner_instance + + result = mapper(sample_ietf_intent) + + assert result == {"path": "node1->node2->node3"} + mock_planner_instance.planner.assert_called_once_with(sample_ietf_intent, "ENERGY") + + @patch('src.mapper.main.realizer') + def test_mapper_with_nrp_enabled_finds_best_nrp(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view): + """Test mapper with NRP enabled finds the best NRP.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False, + } + + mock_realizer.return_value = sample_nrp_view + + result = mapper(sample_ietf_intent) + + # Verify realizer was called to READ NRP view + assert mock_realizer.call_args_list[0] == call(None, True, "READ") + assert result is None + + @patch('src.mapper.main.realizer') + def test_mapper_with_nrp_enabled_no_viable_candidates(self, mock_realizer, app_context, sample_ietf_intent): + """Test mapper when no viable NRPs are found.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + # All NRPs are unavailable + nrp_view = [ + { + "id": "nrp-1", + "available": False, + "slices": [], + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 500 + } + ] + } + ] + + mock_realizer.return_value = nrp_view + + result = mapper(sample_ietf_intent) + + assert result is None + + @patch('src.mapper.main.realizer') + def test_mapper_with_nrp_enabled_creates_new_nrp(self, mock_realizer, app_context, sample_ietf_intent): + """Test mapper creates new NRP when no suitable candidate exists.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + # No viable NRPs + nrp_view = [] + + mock_realizer.side_effect = [nrp_view, None] # First call returns empty, second for CREATE + + result = mapper(sample_ietf_intent) + + # Verify CREATE was called + create_call = [c for c in mock_realizer.call_args_list if len(c[0]) > 2 and c[0][2] == "CREATE"] + assert len(create_call) > 0 + + @patch('src.mapper.main.realizer') + def test_mapper_with_nrp_and_planner_both_enabled(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view): + """Test mapper when both NRP and Planner are enabled.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": True, + "PLANNER_TYPE":"ENERGY" + } + + mock_realizer.return_value = sample_nrp_view + + with patch('src.mapper.main.Planner') as mock_planner_class: + mock_planner_instance = MagicMock() + mock_planner_instance.planner.return_value = {"path": "optimized_path"} + mock_planner_class.return_value = mock_planner_instance + + result = mapper(sample_ietf_intent) + + # Planner should be called and return the result + assert result == {"path": "optimized_path"} + + @patch('src.mapper.main.realizer') + def test_mapper_updates_best_nrp_with_slice(self, mock_realizer, app_context, sample_ietf_intent, sample_nrp_view): + """Test mapper updates best NRP with new slice.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + mock_realizer.return_value = sample_nrp_view + + result = mapper(sample_ietf_intent) + + # Verify UPDATE was called + update_calls = [c for c in mock_realizer.call_args_list if len(c[0]) > 2 and c[0][2] == "UPDATE"] + assert len(update_calls) > 0 + + @patch('src.mapper.main.realizer') + def test_mapper_extracts_slos_correctly(self, mock_realizer, app_context, sample_ietf_intent): + """Test that mapper correctly extracts SLOs from intent.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + mock_realizer.return_value = [] + + mapper(sample_ietf_intent) + + # Verify the function processed the intent + assert mock_realizer.called + + @patch('src.mapper.main.logging') + def test_mapper_logs_debug_info(self, mock_logging, app_context, sample_ietf_intent, sample_nrp_view): + """Test mapper logs debug information.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + with patch('src.mapper.main.realizer') as mock_realizer: + mock_realizer.return_value = sample_nrp_view + + mapper(sample_ietf_intent) + + # Verify debug logging was called + assert mock_logging.debug.called + + +class TestMapperIntegration: + """Integration tests for mapper functionality.""" + + def test_mapper_complete_nrp_workflow(self, app_context, sample_ietf_intent, sample_nrp_view): + """Test complete NRP mapping workflow.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + with patch('src.mapper.main.realizer') as mock_realizer: + mock_realizer.return_value = sample_nrp_view + + result = mapper(sample_ietf_intent) + + # Verify the workflow sequence + assert mock_realizer.call_count >= 1 + first_call = mock_realizer.call_args_list[0] + assert first_call[0][1] is True # need_nrp parameter + assert first_call[0][2] == "READ" # READ operation + + def test_mapper_complete_planner_workflow(self, app_context, sample_ietf_intent): + """Test complete Planner workflow.""" + app_context.config = { + "NRP_ENABLED": False, + "PLANNER_ENABLED": True, + "PLANNER_TYPE":"ENERGY" + } + + expected_path = { + "path": "node1->node2->node3", + "cost": 10, + "latency": 5 + } + + with patch('src.mapper.main.Planner') as mock_planner_class: + mock_planner_instance = MagicMock() + mock_planner_instance.planner.return_value = expected_path + mock_planner_class.return_value = mock_planner_instance + + result = mapper(sample_ietf_intent) + + assert result == expected_path + mock_planner_instance.planner.assert_called_once() + + def test_mapper_with_invalid_nrp_response(self, app_context, sample_ietf_intent): + """Test mapper behavior with invalid NRP response.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + # Invalid NRP without expected fields + invalid_nrp = { + "id": "nrp-invalid" + # Missing 'available' and 'slos' fields + } + + with patch('src.mapper.main.realizer') as mock_realizer: + mock_realizer.return_value = [invalid_nrp] + + # Should handle gracefully + try: + result = mapper(sample_ietf_intent) + except (KeyError, TypeError): + # Expected to fail gracefully + pass + + def test_mapper_with_missing_slos_in_intent(self, app_context): + """Test mapper behavior when intent has no SLOs.""" + app_context.config = { + "NRP_ENABLED": True, + "PLANNER_ENABLED": False + } + + invalid_intent = { + "ietf-network-slice-service:network-slice-services": { + "slice-service": [{ + "id": "slice-1" + }], + "slo-sle-templates": { + "slo-sle-template": [{ + "id": "profile1", + "slo-policy": { + # No metric-bound key + } + }] + } + } + } + + try: + mapper(invalid_intent) + except (KeyError, TypeError): + # Expected behavior + pass + + +class TestSloViabilityEdgeCases: + """Edge case tests for slo_viability function.""" + + def test_slo_viability_with_zero_bound(self): + """Test handling of zero bounds in SLO.""" + slice_slos = [ + { + "metric-type": "one-way-bandwidth", + "bound": 0 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 100 + } + ] + } + + # Should handle zero division gracefully or fail as expected + try: + viable, score = slo_viability(slice_slos, nrp_slos) + except (ZeroDivisionError, ValueError): + pass + + def test_slo_viability_with_very_large_bounds(self): + """Test handling of very large SLO bounds.""" + slice_slos = [ + { + "metric-type": "one-way-bandwidth", + "bound": 1e10 + } + ] + + nrp_slos = { + "slos": [ + { + "metric-type": "one-way-bandwidth", + "bound": 2e10 + } + ] + } + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is True + assert isinstance(score, (int, float)) + + def test_slo_viability_all_delay_types(self): + """Test handling of all delay metric t ypes.""" + delay_types = [ + "one-way-delay-maximum", + "two-way-delay-maximum", + "one-way-delay-percentile", + "two-way-delay-percentile", + "one-way-delay-variation-maximum", + "two-way-delay-variation-maximum" + ] + + for delay_type in delay_types: + slice_slos = [{"metric-type": delay_type, "bound": 10}] + nrp_slos = {"slos": [{"metric-type": delay_type, "bound": 8}]} + + viable, score = slo_viability(slice_slos, nrp_slos) + + assert viable is True + assert score >= 0 \ No newline at end of file diff --git a/src/tests/test_nbi_processor.py b/src/tests/test_nbi_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..ef1349425fba9d6756a4b9af122ba58b4a5ff41b --- /dev/null +++ b/src/tests/test_nbi_processor.py @@ -0,0 +1,222 @@ +import pytest +from unittest.mock import patch +from src.nbi_processor.detect_format import detect_format +from src.nbi_processor.main import nbi_processor +from src.nbi_processor.translator import translator + + +# ---------- Tests detect_format ---------- + +def test_detect_format_ietf(): + data = {"ietf-network-slice-service:network-slice-services": {}} + assert detect_format(data) == "IETF" + +def test_detect_format_3gpp_variants(): + assert detect_format({"RANSliceSubnet1": {}}) == "3GPP" + assert detect_format({"NetworkSlice1": {}}) == "3GPP" + assert detect_format({"TopSliceSubnet1": {}}) == "3GPP" + assert detect_format({"CNSliceSubnet1": {}}) == "3GPP" + +def test_detect_format_none(): + assert detect_format({"foo": "bar"}) is None + + +# ---------- Fixtures ---------- + +@pytest.fixture +def ietf_intent(): + return {"ietf-network-slice-service:network-slice-services": {"foo": "bar"}} + +@pytest.fixture +def gpp_intent(): + # Estructura mínima consistente con translator + return { + "RANSliceSubnet1": { + "networkSliceSubnetRef": ["subnetA", "subnetB"] + }, + "subnetA": { + "EpTransport": ["EpTransport ep1", "EpTransport ep2"], + "SliceProfileList": [{ + "RANSliceSubnetProfile": { + "dLThptPerSliceSubnet": { + "GuaThpt": 1, + "MaxThpt": 2 + }, + "uLThptPerSliceSubnet": { + "GuaThpt": 1, + "MaxThpt": 2 + }, + "dLLatency": 20, + "uLLatency": 20 + } + }], + }, + "subnetB": { + "EpTransport": ["EpTransport ep3", "EpTransport ep4"], + }, + "EpTransport ep1": { + "qosProfile": "qosA", + "EpApplicationRef": ["EP_N2 epRef1"], + "logicalInterfaceInfo": {"logicalInterfaceType": "typeA", "logicalInterfaceId": "idA"}, + "IpAddress": "1.1.1.1", + "NextHopInfo": "NH1", + }, + "EpTransport ep2": { + "qosProfile": "qosB", + "EpApplicationRef": ["EP_N2 epRef2"], + "logicalInterfaceInfo": {"logicalInterfaceType": "typeB", "logicalInterfaceId": "idB"}, + "IpAddress": "2.2.2.2", + "NextHopInfo": "NH2", + }, + "EP_N2 epRef1": {"localAddress": "10.0.0.1", "remoteAddress": "11.1.1.1", "epTransportRef": "ep1"}, + "EP_N2 epRef2": {"localAddress": "10.0.0.2", "remoteAddress": "11.1.1.2", "epTransportRef": "ep2"}, + "EpTransport ep3": {"qosProfile": "qosC", "EpApplicationRef": ["EP_N2 epRef3"], "logicalInterfaceInfo": {"logicalInterfaceType": "typeC", "logicalInterfaceId": "idC"}, "IpAddress": "3.3.3.3", "NextHopInfo": "NH3"}, + "EpTransport ep4": {"qosProfile": "qosD", "EpApplicationRef": ["EP_N2 epRef4"], "logicalInterfaceInfo": {"logicalInterfaceType": "typeD", "logicalInterfaceId": "idD"}, "IpAddress": "4.4.4.4", "NextHopInfo": "NH4"}, + "EP_N2 epRef3": {"localAddress": "10.0.0.3", "remoteAddress": "11.1.1.3", "epTransportRef": "ep3"}, + "EP_N2 epRef4": {"localAddress": "10.0.0.4", "remoteAddress": "11.1.1.4", "epTransportRef": "ep4"}, + } + + +@pytest.fixture +def fake_template(): + # Plantilla mínima para que el traductor funcione + return { + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + {"id": "", "slo-policy": {"metric-bound": []}} + ] + }, + "slice-service": [ + { + "id": "", + "description": "", + "slo-sle-policy": {}, + "sdps": {"sdp": [ + {"service-match-criteria": {"match-criterion": [{}]}, "attachment-circuits": {"attachment-circuit": [{"sdp-peering": {}}]}}, + {"service-match-criteria": {"match-criterion": [{}]}, "attachment-circuits": {"attachment-circuit": [{"sdp-peering": {}}]}} + ]}, + "connection-groups": {"connection-group": [{}]}, + } + ], + } + } + + +# ---------- Tests nbi_processor ---------- + +def test_nbi_processor_ietf(ietf_intent): + result = nbi_processor(ietf_intent) + assert isinstance(result, list) + assert result[0] == ietf_intent + +@patch("src.nbi_processor.main.translator") +def test_nbi_processor_3gpp(mock_translator, gpp_intent): + mock_translator.return_value = {"ietf-network-slice-service:network-slice-services": {}} + result = nbi_processor(gpp_intent) + assert isinstance(result, list) + assert len(result) == 2 # Dos subnets procesados + assert all("ietf-network-slice-service:network-slice-services" in r for r in result) + +def test_nbi_processor_unrecognized(): + with pytest.raises(ValueError): + nbi_processor({"foo": "bar"}) + +def test_nbi_processor_empty(): + with pytest.raises(ValueError): + nbi_processor({}) + + +# ---------- Tests translator ---------- + +@patch("src.nbi_processor.translator.load_template") +def test_translator_basic(mock_load_template, gpp_intent, fake_template): + mock_load_template.return_value = fake_template + result = translator(gpp_intent, "subnetA") + + assert isinstance(result, dict) + assert "ietf-network-slice-service:network-slice-services" in result + + slice_service = result["ietf-network-slice-service:network-slice-services"]["slice-service"][0] + assert slice_service["id"].startswith("slice-service-") + assert "description" in slice_service + assert slice_service["slo-sle-policy"]["slo-sle-template"] == "qosA" # viene del ep1 + +import re +import uuid + + +# ---------- Extra detect_format ---------- + +@pytest.mark.parametrize("data", [ + None, + [], + "", + 123, +]) +def test_detect_format_invalid_types(data): + assert detect_format(data if isinstance(data, dict) else {}) in (None, "IETF", "3GPP") + + +def test_detect_format_multiple_keys(): + # Si tiene IETF y 3GPP, debe priorizar IETF + data = { + "ietf-network-slice-service:network-slice-services": {}, + "RANSliceSubnet1": {} + } + assert detect_format(data) == "IETF" + + +# ---------- Extra nbi_processor ---------- + +def test_nbi_processor_gpp_missing_refs(gpp_intent): + # Quitar networkSliceSubnetRef debería provocar ValueError en translator loop + broken = gpp_intent.copy() + broken["RANSliceSubnet1"] = {} # no tiene "networkSliceSubnetRef" + with pytest.raises(KeyError): + nbi_processor(broken) + + +# ---------- Extra translator ---------- + +@patch("src.nbi_processor.translator.load_template") +def test_translator_maps_metrics(mock_load_template, gpp_intent, fake_template): + mock_load_template.return_value = fake_template + result = translator(gpp_intent, "subnetA") + + metrics = result["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"] + metric_types = {m["metric-type"] for m in metrics} + assert "one-way-delay-maximum" in metric_types + assert "one-way-bandwidth" in metric_types + + +@patch("src.nbi_processor.translator.load_template") +def test_translator_empty_profile(mock_load_template, gpp_intent, fake_template): + mock_load_template.return_value = fake_template + gpp_intent["subnetA"]["SliceProfileList"] = [{}] # vacío + result = translator(gpp_intent, "subnetA") + metrics = result["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"]["metric-bound"] + assert metrics == [] # no debería añadir nada + +@patch("src.nbi_processor.translator.load_template") +def test_translator_sdps_are_populated(mock_load_template, gpp_intent, fake_template): + mock_load_template.return_value = fake_template + result = translator(gpp_intent, "subnetA") + slice_service = result["ietf-network-slice-service:network-slice-services"]["slice-service"][0] + + sdp0 = slice_service["sdps"]["sdp"][0] + assert sdp0["node-id"] == "ep1" + assert re.match(r"^\d+\.\d+\.\d+\.\d+$", sdp0["attachment-circuits"]["attachment-circuit"][0]["ac-ipv4-address"]) + assert "target-connection-group-id" in sdp0["service-match-criteria"]["match-criterion"][0] + + sdp1 = slice_service["sdps"]["sdp"][1] + assert sdp1["node-id"] == "ep2" + assert sdp1["attachment-circuits"]["attachment-circuit"][0]["sdp-peering"]["peer-sap-id"].startswith("NH") + + +@patch("src.nbi_processor.translator.load_template") +def test_translator_with_single_endpoint_should_fail(mock_load_template, gpp_intent, fake_template): + mock_load_template.return_value = fake_template + gpp_intent["subnetA"]["EpTransport"] = ["EpTransport ep1"] # solo uno + with pytest.raises(IndexError): + translator(gpp_intent, "subnetA") diff --git a/src/tests/test_utils.py b/src/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7b887a497180777fb9fdd86ca5f04d57c5b4c77a --- /dev/null +++ b/src/tests/test_utils.py @@ -0,0 +1,182 @@ +import json +import pytest +import os + +from src.utils.load_template import load_template +from src.utils.dump_templates import dump_templates +from src.utils.send_response import send_response +from src.utils.build_response import build_response +from flask import Flask + +@pytest.fixture +def tmp_json_file(tmp_path): + """Crea un archivo JSON temporal válido y devuelve su ruta y contenido.""" + data = {"name": "test"} + file_path = tmp_path / "template.json" + file_path.write_text(json.dumps(data)) + return file_path, data + + +def test_load_template_ok(tmp_json_file): + """Debe cargar correctamente un JSON válido.""" + file_path, expected = tmp_json_file + result = load_template(str(file_path)) + assert result == expected + + +def test_load_template_invalid(tmp_path): + """Debe devolver un response con error si el JSON es inválido.""" + bad_file = tmp_path / "bad.json" + bad_file.write_text("{invalid json}") + + result, code = load_template(str(bad_file)) + assert code == 500 + assert result["success"] is False + assert "Template loading error" in result["error"] + +def test_dump_templates_enabled(monkeypatch, tmp_path): + """Debe volcar múltiples JSON correctamente en src/templates si DUMP_TEMPLATES está activado.""" + templates_dir = tmp_path / "src" / "templates" + templates_dir.mkdir(parents=True) + + monkeypatch.setattr("src.utils.dump_templates.TEMPLATES_PATH", str(templates_dir)) + + app = Flask(__name__) + app.config["DUMP_TEMPLATES"] = True + + with app.app_context(): + nbi = {"nbi": 1} + ietf = {"ietf": 2} + realizer = {"realizer": 3} + + dump_templates(nbi, ietf, realizer) + + for name, data in [("nbi_template.json", nbi), ("ietf_template.json", ietf), ("realizer_template.json", realizer)]: + file_path = templates_dir / name + assert file_path.exists() + assert json.loads(file_path.read_text()) == data + +def test_dump_templates_disabled(monkeypatch, tmp_path): + """No debe escribir nada en src/templates si DUMP_TEMPLATES está desactivado.""" + templates_dir = tmp_path / "src" / "templates" + templates_dir.mkdir(parents=True) + + monkeypatch.setattr("src.utils.dump_templates.TEMPLATES_PATH", str(templates_dir)) + + app = Flask(__name__) + app.config["DUMP_TEMPLATES"] = False + + with app.app_context(): + dump_templates({"nbi": 1}, {"ietf": 2}, {"realizer": 3}) + + for name in ["nbi_template.json", "ietf_template.json", "realizer_template.json"]: + assert not (templates_dir / name).exists() + +def test_send_response_success(): + """Debe devolver success=True y code=200 si el resultado es True.""" + resp, code = send_response(True, data={"k": "v"}) + assert code == 200 + assert resp["success"] is True + assert resp["data"]["k"] == "v" + assert resp["error"] is None + + +def test_send_response_error(): + """Debe devolver success=False y code=400 si el resultado es False.""" + resp, code = send_response(False, message="fallo") + assert code == 400 + assert resp["success"] is False + assert resp["data"] is None + assert "fallo" in resp["error"] + +def ietf_intent(): + """Intento válido en formato IETF simplificado.""" + return { + "ietf-network-slice-service:network-slice-services": { + "slo-sle-templates": { + "slo-sle-template": [ + { + "id": "qos1", + "slo-policy": { + "metric-bound": [ + { + "metric-type": "one-way-bandwidth", + "metric-unit": "kbps", + "bound": 1000 + } + ], + "availability": 99.9, + "mtu": 1500 + } + } + ] + }, + "slice-service": [ + { + "id": "slice-test-1", + "sdps": { + "sdp": [ + { + "id": "CU", + "sdp-ip-address": "10.0.0.1", + "service-match-criteria": { + "match-criterion": [{"match-type": "vlan", "value": "100"}] + }, + }, + { + "id": "DU", + "sdp-ip-address": "10.0.0.2", + "service-match-criteria": { + "match-criterion": [{"match-type": "vlan", "value": "100"}] + }, + }, + ] + }, + } + ], + } + } + + +def test_build_response_ok(): + """Debe construir correctamente el response a partir de un intent IETF válido.""" + intent = ietf_intent() + response = [] + result = build_response(intent, response) + + assert isinstance(result, list) + assert len(result) == 1 + + slice_data = result[0] + assert slice_data["id"] == "slice-test-1" + assert slice_data["source"] == "CU" + assert slice_data["destination"] == "DU" + assert slice_data["vlan"] == "100" + + # Validar constraints + requirements = slice_data["requirements"] + assert any(r["constraint_type"] == "one-way-bandwidth[kbps]" and r["constraint_value"] == "1000" for r in requirements) + assert any(r["constraint_type"] == "availability[%]" and r["constraint_value"] == "99.9" for r in requirements) + assert any(r["constraint_type"] == "mtu[bytes]" and r["constraint_value"] == "1500" for r in requirements) + + +def test_build_response_empty_policy(): + """Debe devolver lista sin constraints si slo-policy está vacío.""" + intent = ietf_intent() + intent["ietf-network-slice-service:network-slice-services"]["slo-sle-templates"]["slo-sle-template"][0]["slo-policy"] = {} + response = [] + result = build_response(intent, response) + + assert isinstance(result, list) + assert len(result[0]["requirements"]) == 0 + + +def test_build_response_invalid_intent(): + """Debe fallar limpiamente si el intent no tiene la estructura esperada.""" + bad_intent = {} + response = [] + try: + result = build_response(bad_intent, response) + except Exception: + result = [] + assert result == []