Skip to content
......@@ -21,7 +21,7 @@ build l3_attackmitigator:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......
......@@ -21,7 +21,7 @@ build l3_centralizedattackdetector:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......
......@@ -21,7 +21,7 @@ build l3_distributedattackdetector:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......
......@@ -21,7 +21,7 @@ build load_generator:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......
......@@ -21,7 +21,7 @@ build monitoring:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......
......@@ -38,7 +38,6 @@ from common.tools.service.GenericGrpcService import GenericGrpcService
from common.tools.timestamp.Converters import timestamp_utcnow_to_float #, timestamp_string_to_float
from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from device.service.DeviceService import DeviceService
from device.service.driver_api.DriverFactory import DriverFactory
from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
from monitoring.client.MonitoringClient import MonitoringClient
......@@ -54,6 +53,7 @@ from monitoring.tests.Messages import create_kpi_request, create_kpi_request_d,
from monitoring.tests.Objects import DEVICE_DEV1, DEVICE_DEV1_CONNECT_RULES, DEVICE_DEV1_UUID, ENDPOINT_END1_UUID
os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
from device.service.DeviceService import DeviceService # pylint: disable=wrong-import-position,ungrouped-imports
from device.service.drivers import DRIVERS # pylint: disable=wrong-import-position,ungrouped-imports
......
......@@ -21,7 +21,7 @@ build nbi:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import logging
from typing import Dict
from typing import Dict, List
from flask import request
from flask.json import jsonify
from flask_restful import Resource
......@@ -36,11 +36,40 @@ class L3VPN_Services(Resource):
request_data : Dict = request.json
LOGGER.debug('Request: {:s}'.format(str(request_data)))
errors = list()
if 'ietf-l3vpn-svc:l3vpn-services' in request_data:
# processing multiple L3VPN service requests formatted as:
#{
# "ietf-l3vpn-svc:l3vpn-services": {
# "l3vpn-svc": [
# {
# "service-id": "vpn1",
# "vpn-services": {
# "vpn-service": [
for l3vpn_svc in request_data['ietf-l3vpn-svc:l3vpn-services']['l3vpn-svc']:
l3vpn_svc.pop('service-id', None)
l3vpn_svc_request_data = {'ietf-l3vpn-svc:l3vpn-svc': l3vpn_svc}
errors.extend(self._process_l3vpn(l3vpn_svc_request_data))
elif 'ietf-l3vpn-svc:l3vpn-svc' in request_data:
# processing single (standard) L3VPN service request formatted as:
#{
# "ietf-l3vpn-svc:l3vpn-svc": {
# "vpn-services": {
# "vpn-service": [
errors.extend(self._process_l3vpn(request_data))
else:
errors.append('unexpected request: {:s}'.format(str(request_data)))
response = jsonify(errors)
response.status_code = HTTP_CREATED if len(errors) == 0 else HTTP_SERVERERROR
return response
def _process_l3vpn(self, request_data : Dict) -> List[Dict]:
yang_validator = YangValidator('ietf-l3vpn-svc')
request_data = yang_validator.parse_to_dict(request_data)
yang_validator.destroy()
errors = []
errors = list()
for vpn_service in request_data['l3vpn-svc']['vpn-services']['vpn-service']:
process_vpn_service(vpn_service, errors)
......@@ -48,6 +77,4 @@ class L3VPN_Services(Resource):
for site in request_data['l3vpn-svc']['sites']['site']:
process_site(site, errors)
response = jsonify(errors)
response.status_code = HTTP_CREATED if len(errors) == 0 else HTTP_SERVERERROR
return response
return errors
......@@ -21,7 +21,7 @@ build opticalattackdetector:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......@@ -37,62 +37,68 @@ build opticalattackdetector:
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
# apply unit test to the opticalattackdetector component
unit_test opticalattackdetector:
variables:
IMAGE_NAME: 'opticalattackdetector' # name of the microservice
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: unit_test
needs:
- build opticalattackdetector
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
- if docker container ls | grep redis; then docker rm -f redis; else echo "redis image is not in the system"; fi
- if docker container ls | grep dbscanserving; then docker rm -f dbscanserving; else echo "dbscanserving image is not in the system"; fi
script:
- export REDIS_PASSWORD=$(uuidgen)
- docker pull "redis:7.0-alpine"
- docker run --name redis -d --network=teraflowbridge -p 16379:6379 -e REDIS_PASSWORD=${REDIS_PASSWORD} --rm redis:7.0-alpine redis-server --requirepass ${REDIS_PASSWORD}
- docker logs redis
- REDIS_ADDRESS=$(docker inspect redis --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- docker pull "$CI_REGISTRY_IMAGE/dbscanserving:$IMAGE_TAG"
- docker run --name dbscanserving -d -p 10008:10008 --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/dbscanserving:$IMAGE_TAG "python -m dbscanserving.service"
- docker logs dbscanserving
- DBSCANSERVING_ADDRESS=$(docker inspect dbscanserving --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- >
docker run --name $IMAGE_NAME -d -p 10006:10006
-v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results"
-e REDIS_PASSWORD=${REDIS_PASSWORD}
-e DBSCANSERVINGSERVICE_SERVICE_HOST=${DBSCANSERVING_ADDRESS}
-e CACHINGSERVICE_SERVICE_HOST=${REDIS_ADDRESS}
--network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/${IMAGE_NAME}/results/${IMAGE_NAME}_report.xml; coverage xml -o /home/${IMAGE_NAME}/results/${IMAGE_NAME}_coverage.xml; coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker rm -f redis
- docker rm -f dbscanserving
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- src/$IMAGE_NAME/tests/Dockerfile
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
## apply unit test to the opticalattackdetector component
#unit_test opticalattackdetector:
# variables:
# IMAGE_NAME: 'opticalattackdetector' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: unit_test
# needs:
# - build opticalattackdetector
# before_script:
# - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
# - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
# - if docker container ls | grep redis; then docker rm -f redis; else echo "redis image is not in the system"; fi
# - if docker container ls | grep dbscanserving; then docker rm -f dbscanserving; else echo "dbscanserving image is not in the system"; fi
# script:
# - export REDIS_PASSWORD=$(uuidgen)
# - docker pull "redis:7.0-alpine"
# - docker run --name redis -d --network=teraflowbridge -p 16379:6379 -e REDIS_PASSWORD=${REDIS_PASSWORD} --rm redis:7.0-alpine redis-server --requirepass ${REDIS_PASSWORD}
# - while ! docker logs redis 2>&1 | grep -q 'Ready to accept connections'; do sleep 1; done
# - docker logs redis
# - REDIS_ADDRESS=$(docker inspect redis --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
# - docker pull "$CI_REGISTRY_IMAGE/dbscanserving:$IMAGE_TAG"
# - docker run --name dbscanserving -d -p 10008:10008 --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/dbscanserving:$IMAGE_TAG "python -m dbscanserving.service"
# - docker logs dbscanserving
# - DBSCANSERVING_ADDRESS=$(docker inspect dbscanserving --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
# - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
# - >
# docker run --name $IMAGE_NAME -d -p 10006:10006
# -v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results"
# -e REDIS_PASSWORD=${REDIS_PASSWORD}
# -e DBSCANSERVINGSERVICE_SERVICE_HOST=${DBSCANSERVING_ADDRESS}
# -e CACHINGSERVICE_SERVICE_HOST=${REDIS_ADDRESS}
# --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
# - sleep 5
# - docker ps -a
# - docker logs $IMAGE_NAME
# - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/${IMAGE_NAME}/results/${IMAGE_NAME}_report.xml"
# - docker logs redis
# - docker logs dbscanserving
# - docker logs $IMAGE_NAME
# - docker exec -i $IMAGE_NAME bash -c "coverage xml -o /home/${IMAGE_NAME}/results/${IMAGE_NAME}_coverage.xml"
# - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
# coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
# after_script:
# - docker rm -f $IMAGE_NAME
# - docker rm -f redis
# - docker rm -f dbscanserving
# - docker network rm teraflowbridge
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# - changes:
# - src/$IMAGE_NAME/**/*.{py,in,yml}
# - src/$IMAGE_NAME/Dockerfile
# - src/$IMAGE_NAME/tests/*.py
# - src/$IMAGE_NAME/tests/Dockerfile
# - manifests/${IMAGE_NAME}service.yaml
# - .gitlab-ci.yml
# artifacts:
# when: always
# reports:
# junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
# Deployment of the opticalattackdetector service in Kubernetes Cluster
......
......@@ -221,7 +221,7 @@ class OpticalAttackDetectorServiceServicerImpl(OpticalAttackDetectorServiceServi
monitoring_client.IncludeKpi(kpi)
# if -1 in response.cluster_indices: # attack detected
if kpi.kpi_value.int32Val == -1:
if kpi.kpi_value.int32Val == 1:
attack = AttackDescription()
attack.cs_id.uuid = request.service_id.service_uuid.uuid
with HISTOGRAM_DURATION.labels(step="mitigation", **METRIC_LABELS).time():
......
......@@ -15,13 +15,13 @@
import logging
import uuid
import queue
import time
from unittest.mock import patch
import pytest
from common.proto import dbscanserving_pb2 as dbscan
from common.proto.optical_attack_detector_pb2 import DetectionRequest
from common.tests.MockServicerImpl_Monitoring import MockServicerImpl_Monitoring
from opticalattackdetector.client.OpticalAttackDetectorClient import \
OpticalAttackDetectorClient
......@@ -37,6 +37,7 @@ LOGGER = logging.getLogger(__name__)
def optical_attack_detector_service():
_service = OpticalAttackDetectorService()
_service.start()
time.sleep(2)
yield _service
_service.stop()
......@@ -44,7 +45,7 @@ def optical_attack_detector_service():
@pytest.fixture(scope="session")
def optical_attack_detector_client(optical_attack_detector_service: OpticalAttackDetectorService):
_client = OpticalAttackDetectorClient(
host=optical_attack_detector_service.bind_address,
host="127.0.0.1",
port=optical_attack_detector_service.bind_port,
)
yield _client
......@@ -56,26 +57,20 @@ def test_detect_attack(
optical_attack_detector_client: OpticalAttackDetectorClient,
):
message = dbscan.DetectionResponse()
message.cluster_indices.extend([0, 1, -1, -1, -1])
message.cluster_indices.extend([0, 1] * 5 + [-1] * 10)
monitoring_mock = MockServicerImpl_Monitoring(queue_samples = queue.Queue())
with patch(
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.attack_mitigator_client"
) as mitigator, patch(
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.monitoring_client",
monitoring_mock,
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.monitoring_client.IncludeKpi",
) as monitoring, patch(
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.dbscanserving_client.Detect",
# TODO: return dumb object with "cluster_indices" attribute
# idea: create new response object
return_value=message,
) as dbscanserving:
for _ in range(10):
request: DetectionRequest = DetectionRequest()
request.service_id.context_id.context_uuid.uuid = str(uuid.uuid4())
request.service_id.service_uuid.uuid = str(uuid.uuid4())
request.kpi_id.kpi_id.uuid = "1"
optical_attack_detector_client.DetectAttack(request)
dbscanserving.assert_called_once()
monitoring.IncludeKpi.assert_called_once()
mitigator.NotifyAttack.assert_called()
dbscanserving.assert_called()
monitoring.assert_called()
......@@ -21,7 +21,7 @@ build opticalattackmanager:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......@@ -38,45 +38,45 @@ build opticalattackmanager:
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
# Apply unit test to the component
unit_test opticalattackmanager:
variables:
IMAGE_NAME: 'opticalattackmanager' # name of the microservice
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: unit_test
needs:
- build opticalattackmanager
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 10005:10005 -e LOG_LEVEL=DEBUG -v "$PWD/src/$IMAGE_NAME/tests:/home/teraflow/controller/$IMAGE_NAME/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- docker ps -a
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/teraflow/controller/$IMAGE_NAME/results/${IMAGE_NAME}_report.xml; coverage report --include='${IMAGE_NAME}/*' --show-missing"
- ls -la src/$IMAGE_NAME/tests
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- src/$IMAGE_NAME/tests/Dockerfile
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
## Apply unit test to the component
#unit_test opticalattackmanager:
# variables:
# IMAGE_NAME: 'opticalattackmanager' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: unit_test
# needs:
# - build opticalattackmanager
# before_script:
# - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
# - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
# script:
# - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
# - docker run --name $IMAGE_NAME -d -p 10005:10005 -e LOG_LEVEL=DEBUG -v "$PWD/src/$IMAGE_NAME/tests:/home/teraflow/controller/$IMAGE_NAME/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
# - docker ps -a
# - docker logs $IMAGE_NAME
# - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/teraflow/controller/$IMAGE_NAME/results/${IMAGE_NAME}_report.xml; coverage report --include='${IMAGE_NAME}/*' --show-missing"
# - ls -la src/$IMAGE_NAME/tests
# coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
# after_script:
# - docker rm -f $IMAGE_NAME
# - docker network rm teraflowbridge
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# - changes:
# - src/common/**/*.py
# - proto/*.proto
# - src/$IMAGE_NAME/**/*.{py,in,yml}
# - src/$IMAGE_NAME/Dockerfile
# - src/$IMAGE_NAME/tests/*.py
# - src/$IMAGE_NAME/tests/Dockerfile
# - manifests/${IMAGE_NAME}service.yaml
# - .gitlab-ci.yml
# artifacts:
# when: always
# reports:
# junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
# Deployment of the service in Kubernetes Cluster
# deploy opticalattackmanager:
......
......@@ -21,7 +21,7 @@ build opticalattackmitigator:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
......@@ -36,41 +36,41 @@ build opticalattackmitigator:
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
# apply unit test to the opticalattackmitigator component
unit_test opticalattackmitigator:
variables:
IMAGE_NAME: 'opticalattackmitigator' # name of the microservice
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: unit_test
needs:
- build opticalattackmitigator
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 10007:10007 -v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results" --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/${IMAGE_NAME}/results/${IMAGE_NAME}_report.xml; coverage xml -o /home/${IMAGE_NAME}/results/${IMAGE_NAME}_coverage.xml; coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
## apply unit test to the opticalattackmitigator component
#unit_test opticalattackmitigator:
# variables:
# IMAGE_NAME: 'opticalattackmitigator' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: unit_test
# needs:
# - build opticalattackmitigator
# before_script:
# - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
# - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
# script:
# - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
# - docker run --name $IMAGE_NAME -d -p 10007:10007 -v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results" --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
# - sleep 5
# - docker ps -a
# - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/${IMAGE_NAME}/results/${IMAGE_NAME}_report.xml; coverage xml -o /home/${IMAGE_NAME}/results/${IMAGE_NAME}_coverage.xml; coverage report --include='${IMAGE_NAME}/*' --show-missing"
# coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
# after_script:
# - docker rm -f $IMAGE_NAME
# - docker network rm teraflowbridge
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# - changes:
# - src/$IMAGE_NAME/**/*.{py,in,yml}
# - src/$IMAGE_NAME/Dockerfile
# - src/$IMAGE_NAME/tests/*.py
# - manifests/${IMAGE_NAME}service.yaml
# - .gitlab-ci.yml
# artifacts:
# when: always
# reports:
# junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
# Deployment of the opticalattackmitigator service in Kubernetes Cluster
......
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Build, tag, and push the Docker image to the GitLab Docker registry
build opticalcontroller:
variables:
IMAGE_NAME: 'opticalcontroller' # name of the microservice
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: build
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
after_script:
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
## Apply unit test to the component
#unit_test opticalcontroller:
# variables:
# IMAGE_NAME: 'opticalcontroller' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: unit_test
# needs:
# - build opticalcontroller
# before_script:
# - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
# - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
# script:
# - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
# - docker run --name $IMAGE_NAME -d -p 20030:20030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
# - sleep 5
# - docker ps -a
# - docker logs $IMAGE_NAME
# - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
# - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
# coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
# after_script:
# - docker rm -f $IMAGE_NAME
# - docker network rm teraflowbridge
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# - changes:
# - src/common/**/*.py
# - proto/*.proto
# - src/$IMAGE_NAME/**/*.{py,in,yml}
# - src/$IMAGE_NAME/Dockerfile
# - src/$IMAGE_NAME/tests/*.py
# - manifests/${IMAGE_NAME}service.yaml
# - .gitlab-ci.yml
# artifacts:
# when: always
# reports:
# junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
## Deployment of the service in Kubernetes Cluster
#deploy opticalcontroller:
# variables:
# IMAGE_NAME: 'opticalcontroller' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: deploy
# needs:
# - unit test opticalcontroller
# # - integ_test execute
# script:
# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
# - kubectl version
# - kubectl get all
# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
# - kubectl get all
# # environment:
# # name: test
# # url: https://example.com
# # kubernetes:
# # namespace: test
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# when: manual
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# when: manual
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.9-slim
# Install dependencies
RUN apt-get --yes --quiet --quiet update && \
apt-get --yes --quiet --quiet install wget g++ git && \
rm -rf /var/lib/apt/lists/*
# Set Python to show logs as they occur
ENV PYTHONUNBUFFERED=0
# Download the gRPC health probe
RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
chmod +x /bin/grpc_health_probe
# Get generic Python packages
RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install --upgrade setuptools wheel
RUN python3 -m pip install --upgrade pip-tools
# Get common Python packages
# Note: this step enables sharing the previous Docker build steps among all the Python components
WORKDIR /var/teraflow
COPY common_requirements.in common_requirements.in
RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
RUN python3 -m pip install -r common_requirements.txt
# Add common files into working directory
WORKDIR /var/teraflow/common
COPY src/common/. ./
RUN rm -rf proto
# Create proto sub-folder, copy .proto files, and generate Python code
RUN mkdir -p /var/teraflow/common/proto
WORKDIR /var/teraflow/common/proto
RUN touch __init__.py
COPY proto/*.proto ./
RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
RUN rm *.proto
RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
# Create component sub-folders, get specific Python packages
RUN mkdir -p /var/teraflow/opticalcontroller
WORKDIR /var/teraflow/opticalcontroller
COPY src/opticalcontroller/requirements.in requirements.in
RUN pip-compile --quiet --output-file=requirements.txt requirements.in
RUN python3 -m pip install -r requirements.txt
# Add component files into working directory
WORKDIR /var/teraflow
COPY src/context/__init__.py context/__init__.py
COPY src/context/client/. context/client/
COPY src/opticalcontroller/. opticalcontroller/
# Start the service
WORKDIR /var/teraflow/opticalcontroller
ENTRYPOINT ["python", "OpticalController.py"]
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from flask import render_template
from flask_restplus import Resource, Api
from tools import *
from variables import *
from RSA import RSA
import time
import logging
rsa = None
LOGGER = logging.getLogger(__name__)
app = Flask(__name__)
api = Api(app, version='1.0', title='Optical controller API',
description='Rest API to configure OC Optical devices in TFS')
# app.config.from_object('config')
# appbuilder = AppBuilder(app, indexview=MyIndexView)
optical = api.namespace('OpticalTFS', description='TFS Optical APIs')
@app.route('/index')
def index():
return render_template('index.html')
#@optical.route('/AddLightpath/<string:src>/<string:dst>/<int:bitrate>/<int:bidir>')
@optical.route('/AddLightpath/<string:src>/<string:dst>/<int:bitrate>')
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class AddLightpath(Resource):
@staticmethod
def put(src, dst, bitrate, bidir=1):
LOGGER.info("INFO: New Lightpath request from {} to {} with rate {} ".format(src, dst, bitrate))
t0 = time.time()*1000.0
if debug:
rsa.g.printGraph()
if rsa is not None:
flow_id = rsa.rsa_computation(src, dst, bitrate, bidir)
if rsa.db_flows[flow_id]["op-mode"] == 0:
return 'No path found', 404
t1 = time.time()*1000.0
elapsed = t1 - t0
LOGGER.info("INFO: time elapsed = {} ms".format(elapsed))
return rsa.db_flows[flow_id], 200
else:
return "Error", 404
#@optical.route('/AddFlexLightpath/<string:src>/<string:dst>/<int:bitrate>')
@optical.route('/AddFlexLightpath/<string:src>/<string:dst>/<int:bitrate>',
defaults={"bidir": 1, "band": None})
@optical.route('/AddFlexLightpath/<string:src>/<string:dst>/<int:bitrate>/<int:bidir>',
defaults={"band": None})
@optical.route('/AddFlexLightpath/<string:src>/<string:dst>/<int:bitrate>/<int:bidir>/<int:band>',)
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class AddFlexLightpath(Resource):
@staticmethod
def put(src, dst, bitrate,bidir=1, band=None):
print("INFO: New FlexLightpath request from {} to {} with rate {} ".format(src, dst, bitrate))
LOGGER.info("INFO: New FlexLightpath request from {} to {} with rate {} ".format(src, dst, bitrate))
t0 = time.time()*1000.0
if debug:
rsa.g.printGraph()
if rsa is not None:
flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band)
print (f"flow_id {flow_id} and optical_band_id {optical_band_id} ")
if flow_id is not None:
if rsa.db_flows[flow_id]["op-mode"] == 0:
return 'No path found', 404
t1 = time.time() * 1000.0
elapsed = t1 - t0
print("INFO: time elapsed = {} ms".format(elapsed))
return rsa.db_flows[flow_id], 200
else:
if len(rsa.optical_bands[optical_band_id]["flows"]) == 0:
return 'No path found', 404
else:
t1 = time.time() * 1000.0
elapsed = t1 - t0
LOGGER.info("INFO: time elapsed = {} ms".format(elapsed))
return rsa.optical_bands[optical_band_id], 200
else:
return "Error", 404
@optical.route('/DelFlexLightpath/<int:flow_id>/<string:src>/<string:dst>/<int:bitrate>/<int:o_band_id>')
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class DelLightpath(Resource):
@staticmethod
def delete(flow_id, src, dst, bitrate, o_band_id):
if flow_id in rsa.db_flows.keys():
flow = rsa.db_flows[flow_id]
bidir = flow["bidir"]
match1 = flow["src"] == src and flow["dst"] == dst and flow["bitrate"] == bitrate
if bidir:
match2 = flow["src"] == dst and flow["dst"] == src and flow["bitrate"] == bitrate
if match1 or match2:
ob_id = flow["parent_opt_band"]
rsa.del_flow(flow, ob_id)
rsa.db_flows[flow_id]["is_active"] = False
rsa.optical_bands[ob_id]["served_lightpaths"].remove(flow_id)
if rsa.optical_bands[ob_id]["reverse_optical_band_id"] != 0:
rev_ob_id = rsa.optical_bands[ob_id]["reverse_optical_band_id"]
rsa.optical_bands[rev_ob_id]["served_lightpaths"].remove(flow_id)
if debug:
LOGGER.info(links_dict)
return "flow {} deleted".format(flow_id), 200
else:
return "flow {} not matching".format(flow_id), 404
else:
if match1:
ob_id = flow["parent_opt_band"]
rsa.del_flow(flow, ob_id)
rsa.db_flows[flow_id]["is_active"] = False
rsa.optical_bands[ob_id]["served_lightpaths"].remove(flow_id)
if debug:
LOGGER.info(links_dict)
return "flow {} deleted".format(flow_id), 200
else:
return "flow {} not matching".format(flow_id), 404
else:
return "flow id {} does not exist".format(flow_id), 404
@optical.route('/DelLightpath/<int:flow_id>/<string:src>/<string:dst>/<int:bitrate>')
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class DelLightpath(Resource):
@staticmethod
def delete(flow_id, src, dst, bitrate):
if flow_id in rsa.db_flows.keys():
flow = rsa.db_flows[flow_id]
match1 = flow["src"] == src and flow["dst"] == dst and flow["bitrate"] == bitrate
match2 = flow["src"] == dst and flow["dst"] == src and flow["bitrate"] == bitrate
if match1 or match2:
rsa.del_flow(flow)
rsa.db_flows[flow_id]["is_active"] = False
if debug:
LOGGER.info(links_dict)
return "flow {} deleted".format(flow_id), 200
else:
return "flow {} not matching".format(flow_id), 404
else:
return "flow id {} does not exist".format(flow_id), 404
@optical.route('/GetLightpaths')
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class GetFlows(Resource):
@staticmethod
def get():
try:
if debug:
LOGGER.info(rsa.db_flows)
return rsa.db_flows, 200
except:
return "Error", 404
@optical.route('/GetOpticalBands')
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class GetBands(Resource):
@staticmethod
def get():
print("Getting ")
LOGGER.info("Getting")
try:
if debug:
LOGGER.info(rsa.optical_bands)
return rsa.optical_bands, 200
except:
return "Error", 404
@optical.route('/GetOpticalBand/<int:ob_id>')
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class GetBand(Resource):
@staticmethod
def get(ob_id):
for ob_idx in rsa.optical_bands.keys():
if str(ob_idx) == str(ob_id):
if debug:
LOGGER.info(rsa.optical_bands[ob_id])
return rsa.optical_bands[ob_idx], 200
return {}, 404
@optical.route('/GetLinks')
@optical.response(200, 'Success')
@optical.response(404, 'Error, not found')
class GetFlows(Resource):
@staticmethod
def get():
global links_dict
try:
if debug:
LOGGER.info(links_dict)
return links_dict, 200
except:
return "Error", 404
if __name__ == '__main__':
# Start metrics server
LOGGER.info('Starting...')
nodes_dict, links_dict = readTopologyData(nodes_json, topology_json)
#topologies, links = getTopology()
#print("topologies{} and devices {}".format(topologies,links))
rsa = RSA(nodes_dict, links_dict)
app.run(host='0.0.0.0', port=10060, debug=True)
# optical-controller
This a framework to implement the optical controller for the RMSA algorithm.
#create a venv
python -m venv venv
in linux
source venv/Scripts/activate
in windows
venv\Scripts\activate
pip install -r requirements_opt.txt
python OpticalController.py
![Reference Architecture](images/topo.png)
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dijsktra
from tools import *
from variables import *
class RSA():
def __init__(self, nodes, links):
self.nodes_dict = nodes
self.links_dict = links
self.g = None
self.flow_id = 0
self.opt_band_id = 0
self.db_flows = {}
self.initGraph()
self.c_slot_number = 0
self.l_slot_number = 0
self.s_slot_number = 0
self.optical_bands = {}
def init_link_slots(self, testing):
if not testing:
for l in self.links_dict["links"]:
for fib in l["optical_link"]["details"]["fibers"]:
#fib = self.links_dict[l]["fibers"][f]
if len(fib["c_slots"]) > 0:
fib["c_slots"] = list(range(0, Nc))
if len(fib["l_slots"]) > 0:
fib["l_slots"] = list(range(0, Nl))
if len(fib["s_slots"]) > 0:
fib["s_slots"] = list(range(0, Ns))
if debug:
print(fib)
for l1 in self.links_dict["links"]:
for fib1 in l1["optical_link"]["details"]["fibers"]:
#fib1 = self.links_dict[l1]["details"]["fibers"][f1]
self.c_slot_number = len(fib1["c_slots"])
self.l_slot_number = len(fib1["l_slots"])
self.s_slot_number = len(fib1["s_slots"])
break
break
return "{},{},{}".format(self.c_slot_number, self.l_slot_number, self.s_slot_number)
def initGraph(self):
self.g = dijsktra.Graph()
for n in self.nodes_dict:
self.g.add_vertex(n)
for l in self.links_dict["links"]:
if debug:
print(l)
[s, d] = l["optical_link"]["name"].split('-')
ps = l["optical_link"]["details"]["source"]
pd = l["optical_link"]["details"]["target"]
self.g.add_edge(s, d, ps, pd, 1)
print("INFO: Graph initiated.")
if debug:
self.g.printGraph()
def compute_path(self, src, dst):
path = dijsktra.shortest_path(self.g, self.g.get_vertex(src), self.g.get_vertex(dst))
print("INFO: Path from {} to {} with distance: {}".format(src, dst, self.g.get_vertex(dst).get_distance()))
if debug:
print(path)
links = []
for i in range(0, len(path) - 1):
s = path[i]
if debug:
print(s)
if i < len(path) - 1:
d = path[i + 1]
link_id = "{}-{}".format(s, d)
if debug:
#print(link_id, self.links_dict[link_id])
print(link_id, self.get_link_by_name(link_id))
links.append(link_id)
self.g.reset_graph()
return links, path
def get_slots(self, links, slots, optical_band_id = None):
if isinstance(slots, int):
val_c = slots
val_s = slots
val_l = slots
else:
val_c = self.c_slot_number
val_l = self.l_slot_number
val_s = self.s_slot_number
c_sts = []
l_sts = []
s_sts = []
c_slots = {}
l_slots = {}
s_slots = {}
add = ""
drop = ""
src_1, dst_1 = links[0].split('-')
src_2, dst_2 = links[-1].split('-')
if self.nodes_dict[src_1]["type"] == "OC-TP":
add = links[0]
if self.nodes_dict[dst_2]["type"] == "OC-TP":
drop = links[-1]
for l in links:
c_slots[l] = []
l_slots[l] = []
s_slots[l] = []
found = 0
for link in self.links_dict["links"]:
if link["optical_link"]["name"] == l:
#for f in self.links_dict[l]['fibers'].keys():
for fib in link["optical_link"]["details"]["fibers"]:
if l == add:
if 'used' in fib:
if fib["used"]:
#if debug:
print("WARNING!!!: link {}, fiber {} is already in use".format(l, fib["ID"]))
continue
if l == drop:
if 'used' in fib:
if fib["used"]:
#if debug:
print("WARNING!!!: link {}, fiber {} is already in use".format(l, fib["ID"]))
continue
if len(fib["c_slots"]) > 0:
c_slots[l] = combine(c_slots[l], consecutives(fib["c_slots"], val_c))
if len(fib["l_slots"]) > 0:
l_slots[l] = combine(l_slots[l], consecutives(fib["l_slots"], val_l))
if len(fib["s_slots"]) > 0:
s_slots[l] = combine(s_slots[l], consecutives(fib["s_slots"], val_s))
if debug:
print(l, c_slots[l])
found = 1
if found == 0:
return [], [], []
keys = list(c_slots.keys())
if debug:
print(len(keys))
if debug:
print(keys[0])
# intersection among the slots over all links
if len(keys) == 1:
c_sts = c_slots[keys[0]]
l_sts = l_slots[keys[0]]
s_sts = s_slots[keys[0]]
else:
for i in range(1, len(keys)):
if debug:
print(keys[i])
# set a for the intersection
if i == 1:
a_c = c_slots[keys[i - 1]]
a_l = l_slots[keys[i - 1]]
a_s = s_slots[keys[i - 1]]
else:
a_c = c_sts
a_l = l_sts
a_s = s_sts
# set b for the intersection
b_c = c_slots[keys[i]]
b_l = l_slots[keys[i]]
b_s = s_slots[keys[i]]
c_sts = common_slots(a_c, b_c)
l_sts = common_slots(a_l, b_l)
s_sts = common_slots(a_s, b_s)
if optical_band_id is not None:
if "c_slots" in self.optical_bands[optical_band_id].keys():
if len(self.optical_bands[optical_band_id]["c_slots"]) > 0:
a_c = c_sts
b_c = self.optical_bands[optical_band_id]["c_slots"]
c_sts = common_slots(a_c, b_c)
else:
c_sts = []
else:
c_sts = []
if "l_slots" in self.optical_bands[optical_band_id].keys():
if len(self.optical_bands[optical_band_id]["l_slots"]) > 0:
a_l = l_sts
b_l = self.optical_bands[optical_band_id]["l_slots"]
l_sts = common_slots(a_l, b_l)
else:
l_sts = []
else:
l_sts = []
if "s_slots" in self.optical_bands[optical_band_id].keys():
if len(self.optical_bands[optical_band_id]["s_slots"]) > 0:
a_s = s_sts
b_s = self.optical_bands[optical_band_id]["s_slots"]
s_sts = common_slots(a_s, b_s)
else:
s_sts = []
else:
s_sts = []
return c_sts, l_sts, s_sts
def update_link(self, fib, slots, band):
for i in slots:
fib[band].remove(i)
if 'used' in fib:
fib['used'] = True
def update_optical_band(self, optical_band_id, slots, band):
for i in slots:
self.optical_bands[optical_band_id][band].remove(i)
def restore_link(self, fib, slots, band):
for i in slots:
fib[band].append(int(i))
if 'used' in fib:
fib['used'] = False
fib[band].sort()
def restore_optical_band(self, optical_band_id, slots, band):
for i in slots:
self.optical_bands[optical_band_id][band].append(int(i))
self.optical_bands[optical_band_id][band].sort()
def del_flow(self, flow, o_b_id = None):
flows = flow["flows"]
band = flow["band_type"]
slots = flow["slots"]
fiber_f = flow["fiber_forward"]
fiber_b = flow["fiber_backward"]
op = flow["op-mode"]
n_slots = flow["n_slots"]
path = flow["path"]
links = flow["links"]
bidir = flow["bidir"]
for l in fiber_f.keys():
if debug:
print(l)
print(fiber_f[l])
#link = self.links_dict[l]
#f = fiber_f[l]
#fib = link['fibers'][f]
fib = self.get_fiber_details(l, fiber_f[l])
if not list_in_list(slots, fib[band]):
self.restore_link(fib, slots, band)
if debug:
print(fib[band])
if o_b_id is not None:
self.restore_optical_band(o_b_id, slots, band)
if bidir:
for rl in fiber_b.keys():
if debug:
print(rl)
print(fiber_b[rl])
#rlink = self.links_dict[rl]
#rf = fiber_b[rl]
#rfib = rlink['fibers'][rf]
rfib = self.get_fiber_details(rl, fiber_b[rl])
if not list_in_list(slots, rfib[band]):
self.restore_link(rfib, slots, band)
if debug:
print(rfib[band])
#changed according to TFS development
#if o_b_id is not None:
# rev_o_band_id = self.optical_bands[o_b_id]["reverse_optical_band_id"]
# self.restore_optical_band(rev_o_band_id, slots, band)
return True
def get_fibers_forward(self, links, slots, band):
fiber_list = {}
add = links[0]
drop = links[-1]
print(links)
'''
for link in self.links_dict["links"]:
if link["optical_link"]["name"] == l:
# for f in self.links_dict[l]['fibers'].keys():
for fib in link["optical_link"]["details"]["fibers"]:
'''
for l in links:
for link in self.links_dict["links"]:
if link["optical_link"]["name"] == l:
for fib in link["optical_link"]["details"]["fibers"]:
#for f in self.links_dict[l]['fibers'].keys():
#for fib in l["optical_link"]["details"]["fibers"]:
#fib = self.links_dict[l]['fibers'][f]
if l == add:
if 'used' in fib:
if fib["used"]:
if debug:
print("link {}, fiber {} is already in use".format(l, fib["ID"]))
continue
if l == drop:
if 'used' in fib:
if fib["used"]:
if debug:
print("link {}, fiber {} is already in use".format(l, fib["ID"]))
continue
if list_in_list(slots, fib[band]):
fiber_list[l] = fib["ID"]
self.update_link(fib, slots, band)
break
print("INFO: Path forward computation completed")
return fiber_list
def get_link_by_name (self, key):
result = None
for link in self.links_dict["links"]:
if link["optical_link"]["name"] == key:
if debug:
print(link)
result = link
break
return result
def get_fiber_details(self, link_key, fiber_id):
for link in self.links_dict["links"]:
if link["optical_link"]["name"] == link_key:
if debug:
print(link)
for fib in link["optical_link"]["details"]["fibers"]:
if fib["ID"] == fiber_id:
return fib
return None
def get_fibers_backward(self, links, fibers, slots, band):
fiber_list = {}
#r_drop = reverse_link(links[0])
#r_add = reverse_link(links[-1])
for l in fibers.keys():
fib = self.get_fiber_details(l, fibers[l])
'''
link = self.get_link_by_name(l)
#port = self.links_dict[l]["fibers"][fibers[l]]["src_port"]
for fib in link["optical_link"]["details"]["fibers"]:
if fib["ID"] == fibers[l]:
'''
port = fib["src_port"]
r_l = reverse_link(l)
r_link = self.get_link_by_name(r_l)
#for f in r_link["fibers"].keys():
for r_fib in r_link["optical_link"]["details"]["fibers"]:
if r_fib["remote_peer_port"] == port:
if list_in_list(slots, r_fib[band]):
fiber_list[r_l] = r_fib["ID"]
self.update_link(r_fib, slots, band)
print("INFO: Path backward computation completed")
return fiber_list
def select_slots_and_ports(self, links, n_slots, c, l, s, bidir):
if debug:
print(self.links_dict)
band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number)
if band is None:
print("No slots available in the three bands")
return None, None, None
if debug:
print(band, slots)
fibers_f = self.get_fibers_forward(links, slots, band)
fibers_b = []
if bidir:
fibers_b = self.get_fibers_backward(links, fibers_f, slots, band)
if debug:
print("forward")
print(fibers_f)
print("backward")
print(fibers_b)
add = links[0]
drop = links[-1]
inport = "0"
outport = "0"
r_inport = "0"
r_outport = "0"
t_flows = {}
#if len(links) == 1:
for lx in fibers_f:
if lx == add:
inport = "0"
r_outport = "0"
if lx == drop:
outport = "0"
r_inport = "0"
f = fibers_f[lx]
src, dst = lx.split("-")
fibx = self.get_fiber_details(lx, f)
#outport = self.links_dict[lx]['fibers'][f]["src_port"]
outport = fibx["src_port"]
t_flows[src] = {}
t_flows[src]["f"] = {}
t_flows[src]["b"] = {}
t_flows[src]["f"] = {"in": inport, "out": outport}
if bidir:
#r_inport = self.links_dict[lx]['fibers'][f]["local_peer_port"]
r_inport = fibx["local_peer_port"]
t_flows[src]["b"] = {"in": r_inport, "out": r_outport}
#inport = self.links_dict[lx]['fibers'][f]["dst_port"]
inport = fibx["dst_port"]
if bidir:
#r_outport = self.links_dict[lx]['fibers'][f]["remote_peer_port"]
r_outport = fibx["remote_peer_port"]
t_flows[dst] = {}
t_flows[dst]["f"] = {}
t_flows[dst]["b"] = {}
t_flows[dst]["f"] = {"in": inport, "out": "0"}
if bidir:
t_flows[dst]["b"] = {"in": "0", "out": r_outport}
if debug:
print(self.links_dict)
if debug:
print(t_flows)
print("INFO: Flow matrix computed")
return t_flows, band, slots, fibers_f, fibers_b
def select_slots_and_ports_fs(self, links, n_slots, c, l, s, bidir, o_band_id):
if debug:
print(self.links_dict)
band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number)
if band is None:
print("No slots available in the three bands")
return None, None, None, None, None
if debug:
print(band, slots)
fibers_f = self.get_fibers_forward(links, slots, band)
self.update_optical_band(o_band_id, slots, band)
fibers_b = []
if bidir:
fibers_b = self.get_fibers_backward(links, fibers_f, slots, band)
'''
rev_o_band_id = self.optical_bands[o_band_id]["reverse_optical_band_id"]
self.update_optical_band(rev_o_band_id, slots, band)
'''
if debug:
print("forward")
print(fibers_f)
if bidir:
print("backward")
print(fibers_b)
add = links[0]
drop = links[-1]
port_0 = "0"
t_flows = {}
#flows_add_side
f = fibers_f[add]
src, dst = add.split("-")
fibx = self.get_fiber_details(add, f)
#outport = self.links_dict[add]['fibers'][f]["src_port"]
outport = fibx["src_port"]
#T1 rules
t_flows[src] = {}
t_flows[src]["f"] = {}
t_flows[src]["b"] = {}
t_flows[src]["f"] = {"in": port_0, "out": outport}
if bidir:
#r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"]
r_inport = fibx["local_peer_port"]
t_flows[src]["b"] = {"in": r_inport, "out": port_0}
#R1 rules
t_flows[dst] = {}
t_flows[dst]["f"] = {}
t_flows[dst]["b"] = {}
#inport = self.links_dict[add]['fibers'][f]["dst_port"]
inport = fibx["dst_port"]
opt_band_src_port = self.optical_bands[o_band_id]["src_port"]
t_flows[dst]["f"] = {"in": inport, "out": opt_band_src_port}
#to modify to peer ports
if bidir:
#r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"]
r_inport = fibx["local_peer_port"]
t_flows[src]["b"] = {"in": r_inport, "out": port_0}
if bidir:
rev_opt_band_dst_port = self.optical_bands[o_band_id]["rev_dst_port"]
#r_outport = self.links_dict[add]['fibers'][f]["remote_peer_port"]
r_outport = fibx["remote_peer_port"]
t_flows[dst]["b"] = {"in": rev_opt_band_dst_port, "out": r_outport}
#flows_drop_side
# R2 rules
f = fibers_f[drop]
src, dst = drop.split("-")
fiby = self.get_fiber_details(drop, f)
#outport = self.links_dict[drop]['fibers'][f]["src_port"]
outport = fiby["src_port"]
t_flows[src] = {}
t_flows[src]["f"] = {}
t_flows[src]["b"] = {}
opt_band_dst_port = self.optical_bands[o_band_id]["dst_port"]
t_flows[src]["f"] = {"in": opt_band_dst_port, "out": outport}
if bidir:
rev_opt_band_src_port = self.optical_bands[o_band_id]["rev_src_port"]
#r_inport = self.links_dict[drop]['fibers'][f]["local_peer_port"]
r_inport = fiby["local_peer_port"]
t_flows[src]["b"] = {"in": r_inport, "out": rev_opt_band_src_port}
t_flows[dst] = {}
t_flows[dst]["f"] = {}
t_flows[dst]["b"] = {}
#inport = self.links_dict[drop]['fibers'][f]["dst_port"]
inport = fiby["dst_port"]
t_flows[dst]["f"] = {"in": inport, "out": port_0}
if bidir:
#r_inport = self.links_dict[drop]['fibers'][f]["remote_peer_port"]
r_inport = fiby["remote_peer_port"]
t_flows[dst]["b"] = {"in": port_0, "out": r_inport}
if debug:
print(self.links_dict)
if debug:
print(t_flows)
print("INFO: Flow matrix computed for Flex Lightpath")
return t_flows, band, slots, fibers_f, fibers_b
def rsa_computation(self, src, dst, rate, bidir):
self.flow_id += 1
self.db_flows[self.flow_id] = {}
self.db_flows[self.flow_id]["flow_id"] = self.flow_id
self.db_flows[self.flow_id]["src"] = src
self.db_flows[self.flow_id]["dst"] = dst
self.db_flows[self.flow_id]["bitrate"] = rate
self.db_flows[self.flow_id]["bidir"] = bidir
links, path = self.compute_path(src, dst)
if len(path) < 1:
self.null_values(self.flow_id)
return self.flow_id
op, num_slots = map_rate_to_slot(rate)
c_slots, l_slots, s_slots = self.get_slots(links, num_slots)
if debug:
print(c_slots)
print(l_slots)
print(s_slots)
if len(c_slots) > 0 or len(l_slots) > 0 or len(s_slots) > 0:
flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots,
l_slots, s_slots, bidir)
f0, band = freqency_converter(band_range, slots)
if debug:
print(f0, band)
print("INFO: RSA completed for normal wavelenght connection")
if flow_list is None:
self.null_values(self.flow_id)
return self.flow_id
slots_i = []
for i in slots:
slots_i.append(int(i))
# return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band
# links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band
self.db_flows[self.flow_id]["flows"] = flow_list
self.db_flows[self.flow_id]["band_type"] = band_range
self.db_flows[self.flow_id]["slots"] = slots_i
self.db_flows[self.flow_id]["fiber_forward"] = fiber_f
self.db_flows[self.flow_id]["fiber_backward"] = fiber_b
self.db_flows[self.flow_id]["op-mode"] = op
self.db_flows[self.flow_id]["n_slots"] = num_slots
self.db_flows[self.flow_id]["links"] = links
self.db_flows[self.flow_id]["path"] = path
self.db_flows[self.flow_id]["band"] = band
self.db_flows[self.flow_id]["freq"] = f0
self.db_flows[self.flow_id]["is_active"] = True
return self.flow_id
def null_values(self, flow_id):
self.db_flows[flow_id]["flows"] = {}
self.db_flows[flow_id]["band_type"] = ""
self.db_flows[flow_id]["slots"] = []
self.db_flows[flow_id]["fiber_forward"] = []
self.db_flows[flow_id]["fiber_backward"] = []
self.db_flows[flow_id]["op-mode"] = 0
self.db_flows[flow_id]["n_slots"] = 0
self.db_flows[flow_id]["links"] = {}
self.db_flows[flow_id]["path"] = []
self.db_flows[flow_id]["band"] = 0
self.db_flows[flow_id]["freq"] = 0
self.db_flows[flow_id]["is_active"] = False
def null_values_ob(self, ob_id):
self.optical_bands[ob_id]["flows"] = {}
self.optical_bands[ob_id]["band_type"] = ""
#self.optical_bands[ob_id]["slots"] = []
self.optical_bands[ob_id]["fiber_forward"] = []
self.optical_bands[ob_id]["n_slots"] = 0
self.optical_bands[ob_id]["links"] = {}
self.optical_bands[ob_id]["path"] = []
self.optical_bands[ob_id]["band"] = 0
self.optical_bands[ob_id]["freq"] = 0
self.optical_bands[ob_id]["is_active"] = False
self.optical_bands[ob_id]["c_slots"] = []
self.optical_bands[ob_id]["l_slots"] = []
self.optical_bands[ob_id]["s_slots"] = []
self.optical_bands[ob_id]["served_lightpaths"] = []
self.optical_bands[ob_id]["reverse_optical_band_id"] = 0
self.db_flows[self.flow_id]["parent_opt_band"] = 0
self.db_flows[self.flow_id]["new_optical_band"] = 0
def create_optical_band(self, links, path, bidir, num_slots):
print("INFO: Creating optical-band of {} slots".format(num_slots))
self.opt_band_id += 1
forw_opt_band_id = self.opt_band_id
self.optical_bands[forw_opt_band_id] = {}
self.optical_bands[forw_opt_band_id]["optical_band_id"] = forw_opt_band_id
self.optical_bands[forw_opt_band_id]["bidir"] = bidir
'''
back_opt_band_id = 0
if bidir:
self.opt_band_id += 1
back_opt_band_id = self.opt_band_id
self.optical_bands[back_opt_band_id] = {}
self.optical_bands[back_opt_band_id]["optical_band_id"] = back_opt_band_id
self.optical_bands[back_opt_band_id]["bidir"] = bidir
self.optical_bands[back_opt_band_id]["reverse_optical_band_id"] = forw_opt_band_id
self.optical_bands[forw_opt_band_id]["reverse_optical_band_id"] = back_opt_band_id
else:
self.optical_bands[forw_opt_band_id]["reverse_optical_band_id"] = 0
'''
op = 0
temp_links = []
#num_slots = "all"
if self.nodes_dict[path[0]]["type"] == "OC-TP":
add_link = links[0]
temp_links.append(add_link)
links.remove(add_link)
path.remove(path[0])
self.optical_bands[forw_opt_band_id]["src"] = path[0]
'''
if bidir:
self.optical_bands[back_opt_band_id]["dst"] = path[0]
'''
if self.nodes_dict[path[-1]]["type"] == "OC-TP":
drop_link = links[-1]
temp_links.append(drop_link)
links.remove(drop_link)
path.remove(path[-1])
self.optical_bands[forw_opt_band_id]["dst"] = path[-1]
'''
if bidir:
self.optical_bands[back_opt_band_id]["src"] = path[-1]
'''
c_slots, l_slots, s_slots = self.get_slots(links, num_slots)
if debug:
print(c_slots)
print(l_slots)
print(s_slots)
if len(c_slots) > 0 or len(l_slots) > 0 or len(s_slots) > 0:
flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots, l_slots, s_slots, bidir)
f0, band = freqency_converter(band_range, slots)
print(flow_list, band_range, slots, fiber_f, fiber_b)
'''
flow_list_b = {}
rev_path = path.copy()
rev_path.reverse()
rev_links = reverse_links(links)
if bidir:
for dev_x in flow_list.keys():
flow_list_b[dev_x] = {}
flow_list_b[dev_x]["f"] = flow_list[dev_x]["b"]
del flow_list[dev_x]["b"]
rev_path = path.copy()
'''
if debug:
print(f0, band)
print("INFO: RSA completed for optical band")
if flow_list is None:
self.null_values(self.flow_id)
return self.flow_id, []
slots_i = []
for i in slots:
slots_i.append(int(i))
# return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band
# links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band
if len(flow_list) > 0:
src_port = flow_list[path[0]]['f']['out']
dst_port = flow_list[path[-1]]['f']['in']
print(flow_list)
if len(fiber_f.keys()) == 1:
link_x = list(fiber_f.keys())[0]
#fib_x = fiber_f[link_x]
#rev_dst_port = self.links_dict[link_x]['fibers'][fib_x]["local_peer_port"]
#rev_src_port = self.links_dict[link_x]['fibers'][fib_x]["remote_peer_port"]
fibx = self.get_fiber_details(link_x, fiber_f[link_x])
rev_dst_port = fibx["local_peer_port"]
rev_src_port = fibx["remote_peer_port"]
else:
link_in = list(fiber_f.keys())[0]
link_out = list(fiber_f.keys())[-1]
fib_inx = self.get_fiber_details(link_in, fiber_f[link_in])
fib_outx = self.get_fiber_details(link_out, fiber_f[link_out])
rev_dst_port = fib_inx["local_peer_port"]
rev_src_port = fib_outx["remote_peer_port"]
#fib_in = fiber_f[link_in]
#fib_out = fiber_f[link_out]
#rev_dst_port = self.links_dict[link_in]['fibers'][fib_in]["local_peer_port"]
#rev_src_port = self.links_dict[link_out]['fibers'][fib_out]["remote_peer_port"]
self.optical_bands[forw_opt_band_id]["flows"] = flow_list
self.optical_bands[forw_opt_band_id]["band_type"] = band_range
self.optical_bands[forw_opt_band_id]["fiber_forward"] = fiber_f
self.optical_bands[forw_opt_band_id]["fiber_backward"] = fiber_b
self.optical_bands[forw_opt_band_id]["op-mode"] = op
self.optical_bands[forw_opt_band_id]["n_slots"] = num_slots
self.optical_bands[forw_opt_band_id]["links"] = links
self.optical_bands[forw_opt_band_id]["path"] = path
self.optical_bands[forw_opt_band_id]["band"] = band
self.optical_bands[forw_opt_band_id]["freq"] = f0
self.optical_bands[forw_opt_band_id]["is_active"] = True
self.optical_bands[forw_opt_band_id]["src_port"] = src_port
self.optical_bands[forw_opt_band_id]["dst_port"] = dst_port
self.optical_bands[forw_opt_band_id]["rev_dst_port"] = rev_dst_port
self.optical_bands[forw_opt_band_id]["rev_src_port"] = rev_src_port
self.optical_bands[forw_opt_band_id][band_range] = slots_i
self.optical_bands[forw_opt_band_id]["served_lightpaths"] = []
'''
if bidir:
self.optical_bands[back_opt_band_id]["flows"] = flow_list_b
self.optical_bands[back_opt_band_id]["band_type"] = band_range
self.optical_bands[back_opt_band_id]["fiber_forward"] = fiber_b
# self.optical_bands[back_opt_band_id]["fiber_backward"] = fiber_b
self.optical_bands[back_opt_band_id]["op-mode"] = op
self.optical_bands[back_opt_band_id]["n_slots"] = num_slots
self.optical_bands[back_opt_band_id]["links"] = rev_links
self.optical_bands[back_opt_band_id]["path"] = rev_path
self.optical_bands[back_opt_band_id]["band"] = band
self.optical_bands[back_opt_band_id]["freq"] = f0
self.optical_bands[back_opt_band_id]["is_active"] = True
self.optical_bands[back_opt_band_id]["src_port"] = rev_src_port
self.optical_bands[back_opt_band_id]["dst_port"] = rev_dst_port
self.optical_bands[back_opt_band_id][band_range] = slots_i.copy()
self.optical_bands[back_opt_band_id]["served_lightpaths"] = []
'''
return forw_opt_band_id, temp_links
def get_optical_bands(self, r_src, r_dst):
result = []
for ob_id in self.optical_bands:
ob = self.optical_bands[ob_id]
if debug:
print(r_src, ob["src"])
print(r_dst, ob["dst"])
print(ob)
if ob["src"] == r_src and ob["dst"] == r_dst:
result.append(ob_id)
return result
def rsa_fs_computation(self, src, dst, rate, bidir, band):
num_slots_ob = "full_band"
if band is not None:
num_slots_ob = map_band_to_slot(band)
print(band, num_slots_ob)
if self.nodes_dict[src]["type"] == "OC-ROADM" and self.nodes_dict[dst]["type"] == "OC-ROADM":
print("INFO: ROADM to ROADM connection")
links, path = self.compute_path(src, dst)
if len(path) < 1:
self.null_values_ob(self.opt_band_id)
return self.flow_id, []
optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob)
return None, optical_band_id
self.flow_id += 1
self.db_flows[self.flow_id] = {}
self.db_flows[self.flow_id]["flow_id"] = self.flow_id
self.db_flows[self.flow_id]["src"] = src
self.db_flows[self.flow_id]["dst"] = dst
self.db_flows[self.flow_id]["bitrate"] = rate
self.db_flows[self.flow_id]["bidir"] = bidir
print("INFO: TP to TP connection")
if band is None:
temp_links2 = []
temp_path = []
src_links = get_links_from_node(self.links_dict, src)
dst_links = get_links_to_node(self.links_dict, dst)
if len(src_links.keys()) >= 1:
temp_links2.append(list(src_links.keys())[0])
if len(dst_links.keys()) >= 1:
temp_links2.append(list(dst_links.keys())[0])
if len(temp_links2) == 2:
[t_src, roadm_src] = temp_links2[0].split('-')
[roadm_dst, t_dst] = temp_links2[1].split('-')
temp_path.append(t_src)
temp_path.append(roadm_src)
temp_path.append(roadm_dst)
temp_path.append(t_dst)
existing_ob = self.get_optical_bands(roadm_src, roadm_dst)
if len(existing_ob) > 0:
print("INFO: Evaluating existing OB {}".format(existing_ob))
#first checking in existing OB
ob_found = 0
for ob_id in existing_ob:
op, num_slots = map_rate_to_slot(rate)
if debug:
print(temp_links2)
c_slots, l_slots, s_slots = self.get_slots(temp_links2, num_slots, ob_id)
if debug:
print(c_slots)
print(l_slots)
print(s_slots)
if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots:
flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs(temp_links2, num_slots,
c_slots,
l_slots, s_slots, bidir,
ob_id)
f0, band = freqency_converter(band_range, slots)
if debug:
print(f0, band)
print("INFO: RSA completed for Flex Lightpath with OB already in place")
if flow_list is None:
self.null_values(self.flow_id)
continue
slots_i = []
for i in slots:
slots_i.append(int(i))
# return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band
# links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band
self.db_flows[self.flow_id]["flows"] = flow_list
self.db_flows[self.flow_id]["band_type"] = band_range
self.db_flows[self.flow_id]["slots"] = slots_i
self.db_flows[self.flow_id]["fiber_forward"] = fiber_f
self.db_flows[self.flow_id]["fiber_backward"] = fiber_b
self.db_flows[self.flow_id]["op-mode"] = op
self.db_flows[self.flow_id]["n_slots"] = num_slots
self.db_flows[self.flow_id]["links"] = temp_links2
self.db_flows[self.flow_id]["path"] = temp_path
self.db_flows[self.flow_id]["band"] = band
self.db_flows[self.flow_id]["freq"] = f0
self.db_flows[self.flow_id]["is_active"] = True
self.db_flows[self.flow_id]["parent_opt_band"] = ob_id
self.db_flows[self.flow_id]["new_optical_band"] = 0
self.optical_bands[ob_id]["served_lightpaths"].append(self.flow_id)
'''
if bidir:
rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"]
self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id)
'''
return self.flow_id, ob_id
else:
print("not enough slots")
if band is None:
print("INFO: Not existing optical-band meeting the requirements")
else:
print("INFO: optical-band width specified")
#if no OB I create a new one
links, path = self.compute_path(src, dst)
optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob)
op, num_slots = map_rate_to_slot(rate)
# self.flow_id += 1
# self.db_flows[self.flow_id] = {}
# self.db_flows[self.flow_id]["flow_id"] = self.flow_id
# self.db_flows[self.flow_id]["src"] = src
# self.db_flows[self.flow_id]["dst"] = dst
# self.db_flows[self.flow_id]["bitrate"] = rate
# self.db_flows[self.flow_id]["bidir"] = bidir
if debug:
print(temp_links)
c_slots, l_slots, s_slots = self.get_slots(temp_links, num_slots, optical_band_id)
if debug:
print(c_slots)
print(l_slots)
print(s_slots)
if len(c_slots) > 0 or len(l_slots) > 0 or len(s_slots) > 0:
flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs(temp_links, num_slots, c_slots,
l_slots, s_slots, bidir, optical_band_id)
f0, band = freqency_converter(band_range, slots)
if debug:
print(f0, band)
print("INFO: RSA completed for FLex Lightpath with new OB")
if flow_list is None:
self.null_values(self.flow_id)
return self.flow_id, optical_band_id
slots_i = []
for i in slots:
slots_i.append(int(i))
self.db_flows[self.flow_id]["flows"] = flow_list
self.db_flows[self.flow_id]["band_type"] = band_range
self.db_flows[self.flow_id]["slots"] = slots_i
self.db_flows[self.flow_id]["fiber_forward"] = fiber_f
self.db_flows[self.flow_id]["fiber_backward"] = fiber_b
self.db_flows[self.flow_id]["op-mode"] = op
self.db_flows[self.flow_id]["n_slots"] = num_slots
self.db_flows[self.flow_id]["links"] = temp_links
self.db_flows[self.flow_id]["path"] = path
self.db_flows[self.flow_id]["band"] = band
self.db_flows[self.flow_id]["freq"] = f0
self.db_flows[self.flow_id]["is_active"] = True
self.db_flows[self.flow_id]["parent_opt_band"] = optical_band_id
self.db_flows[self.flow_id]["new_optical_band"] = 1
self.optical_bands[optical_band_id]["served_lightpaths"].append(self.flow_id)
'''
if bidir:
rev_ob_id = self.optical_bands[optical_band_id]["reverse_optical_band_id"]
self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id)
'''
return self.flow_id, optical_band_id
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: migrate to NetworkX:
# https://networkx.org/documentation/stable/index.html
# https://networkx.org/documentation/stable/reference/algorithms/shortest_paths.html
import sys
class Vertex:
def __init__(self, node):
self.id = node
self.adjacent = {}
# Set distance to infinity for all nodes
self.distance = float("inf")
# Mark all nodes unvisited
self.visited = False
# Predecessor
self.previous = None
# heapq compara gli item nella coda usando <= per vedere ci sono duplciati:
# se ho una coda di tuple,
# compara il primo elemento della prima tupla nella coda con il primo elemento della seconda tupla nella coda
# se sono diversi si ferma, se sono uguali continua
# la tupla nel caso in esame è: (v.get_distance(),v)
# se due nodi hanno stessa distanza, heapq procede a comparare v: Vertex().
# Va quindi definita una politica per confrontare i Vertex
def __lt__(self, other):
if self.id < other.id:
return True
else:
return False
def __le__(self, other):
if self.id <= other.id:
return True
else:
return False
def add_neighbor(self, neighbor, port):
self.adjacent[neighbor] = port
def del_neighbor(self, neighbor):
self.adjacent.pop(neighbor)
def get_connections(self):
return self.adjacent.keys()
def get_id(self):
return self.id
def get_port(self, neighbor):
return self.adjacent[neighbor][0]
def get_weight(self, neighbor):
return self.adjacent[neighbor][1]
def set_distance(self, dist):
self.distance = dist
def get_distance(self):
return self.distance
def set_previous(self, prev):
self.previous = prev
def set_visited(self):
self.visited = True
def reset_vertex(self):
self.visited = False
self.previous = None
self.distance = float("inf")
def __str__(self):
return str(self.id) + ' adjacent: ' + str([x.id for x in self.adjacent])
class Graph:
def __init__(self):
self.vert_dict = {}
self.num_vertices = 0
def __iter__(self):
return iter(self.vert_dict.values())
def reset_graph(self):
for n in self.vert_dict:
self.get_vertex(n).reset_vertex()
def printGraph(self):
for v in self:
for w in v.get_connections():
vid = v.get_id()
wid = w.get_id()
print ('( %s , %s, %s, %s, %s, %s)' % ( vid, wid, v.get_port(w), w.get_port(v), v.get_weight(w), w.get_weight(v)))
def add_vertex(self, node):
self.num_vertices = self.num_vertices + 1
new_vertex = Vertex(node)
self.vert_dict[node] = new_vertex
return new_vertex
def del_Vertex(self, node):
self.vert_dict.pop(node)
def get_vertex(self, n):
if n in self.vert_dict:
return self.vert_dict[n]
else:
return None
def add_edge(self, frm, to, port_frm, port_to,w):
if frm not in self.vert_dict:
self.add_vertex(frm)
if to not in self.vert_dict:
self.add_vertex(to)
self.vert_dict[frm].add_neighbor(self.vert_dict[to], [port_frm, w])
self.vert_dict[to].add_neighbor(self.vert_dict[frm], [port_to, w])
def del_edge(self, frm, to, cost = 0):
self.vert_dict[frm].del_neighbor(self.vert_dict[to])
self.vert_dict[to].del_neighbor(self.vert_dict[frm])
def get_vertices(self):
return self.vert_dict.keys()
def set_previous(self, current):
self.previous = current
def get_previous(self, current):
return self.previous
def shortest(v, path):
if v.previous:
path.append(v.previous.get_id())
shortest(v.previous, path)
return
import heapq
def dijkstra(aGraph, start):
"""print ('''Dijkstra's shortest path''')"""
# Set the distance for the start node to zero
start.set_distance(0)
# Put tuple pair into the priority queue
unvisited_queue = [(v.get_distance(),v) for v in aGraph]
#priority queue->costruisce un albero in cui ogni nodo parent ha ha un valore <= di ogni child
#heappop prende il valore più piccolo, nel caso di dikstra, il nodo più vicino
heapq.heapify(unvisited_queue)
while len(unvisited_queue):
# Pops a vertex with the smallest distance
uv = heapq.heappop(unvisited_queue)
current = uv[1]
current.set_visited()
#for next in v.adjacent:
for next in current.adjacent:
# if visited, skip
if next.visited:
continue
new_dist = current.get_distance() + current.get_weight(next)
if new_dist < next.get_distance():
next.set_distance(new_dist)
next.set_previous(current)
"""print ('updated : current = %s next = %s new_dist = %s' \
%(current.get_id(), next.get_id(), next.get_distance()))"""
else:
"""print ('not updated : current = %s next = %s new_dist = %s' \
%(current.get_id(), next.get_id(), next.get_distance()))"""
# Rebuild heap
# 1. Pop every item
while len(unvisited_queue):
heapq.heappop(unvisited_queue)
# 2. Put all vertices not visited into the queue
unvisited_queue = [(v.get_distance(),v) for v in aGraph if not v.visited]
heapq.heapify(unvisited_queue)
def shortest_path(graph, src, dst):
dijkstra(graph, src)
target = dst
path = [target.get_id()]
shortest(target, path)
return path[::-1]
if __name__ == '__main__':
print("Testing Algo")
g = Graph()
g.add_vertex('a')
g.add_vertex('b')
g.add_vertex('c')
g.add_vertex('d')
g.add_vertex('e')
g.add_vertex('f')
g.add_edge('a', 'b', 7)
g.add_edge('a', 'c', 9)
g.add_edge('a', 'f', 14)
g.add_edge('b', 'c', 10)
g.add_edge('b', 'd', 15)
g.add_edge('c', 'd', 11)
g.add_edge('c', 'f', 2)
g.add_edge('d', 'e', 6)
g.add_edge('e', 'f', 9)
"""print ('Graph data:')
for v in g:
for w in v.get_connections():
vid = v.get_id()
wid = w.get_id()
print ('( %s , %s, %3d)' % ( vid, wid, v.get_weight(w)))
dijkstra(g, g.get_vertex('a'))
target = g.get_vertex('e')
path = [target.get_id()]
shortest(target, path)
print ('The shortest path : %s' %(path[::-1]))"""
p = shortest_path(g, g.get_vertex('a'), g.get_vertex('e'))
print(p)